blob: 8478bef5484a7a8a00f9d402664167eba32abc4e [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
81int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
216 memory_map_init();
217 io_mem_init();
218#endif
219}
220
pbrook9656f322008-07-01 20:01:19 +0000221#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
222
Juan Quintelae59fb372009-09-29 22:48:21 +0200223static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200224{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100225 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200226
aurel323098dba2009-03-07 21:28:24 +0000227 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
228 version_id is increased. */
229 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000230 tlb_flush(env, 1);
231
232 return 0;
233}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200234
235static const VMStateDescription vmstate_cpu_common = {
236 .name = "cpu_common",
237 .version_id = 1,
238 .minimum_version_id = 1,
239 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200240 .post_load = cpu_common_post_load,
241 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100242 VMSTATE_UINT32(halted, CPUArchState),
243 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200244 VMSTATE_END_OF_LIST()
245 }
246};
pbrook9656f322008-07-01 20:01:19 +0000247#endif
248
Andreas Färber9349b4f2012-03-14 01:38:32 +0100249CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400250{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100251 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400252
253 while (env) {
254 if (env->cpu_index == cpu)
255 break;
256 env = env->next_cpu;
257 }
258
259 return env;
260}
261
Andreas Färber9349b4f2012-03-14 01:38:32 +0100262void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000263{
Andreas Färber9f09e182012-05-03 06:59:07 +0200264#ifndef CONFIG_USER_ONLY
265 CPUState *cpu = ENV_GET_CPU(env);
266#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100267 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000268 int cpu_index;
269
pbrookc2764712009-03-07 15:24:59 +0000270#if defined(CONFIG_USER_ONLY)
271 cpu_list_lock();
272#endif
bellard6a00d602005-11-21 23:25:50 +0000273 env->next_cpu = NULL;
274 penv = &first_cpu;
275 cpu_index = 0;
276 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700277 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000278 cpu_index++;
279 }
280 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000281 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000282 QTAILQ_INIT(&env->breakpoints);
283 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100284#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200285 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100286#endif
bellard6a00d602005-11-21 23:25:50 +0000287 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000288#if defined(CONFIG_USER_ONLY)
289 cpu_list_unlock();
290#endif
pbrookb3c77242008-06-30 16:31:04 +0000291#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600292 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
293 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000294 cpu_save, cpu_load, env);
295#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000296}
297
bellard1fddef42005-04-17 19:16:13 +0000298#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000299#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100300static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000301{
302 tb_invalidate_phys_page_range(pc, pc + 1, 0);
303}
304#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400305static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
306{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400307 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
308 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400309}
bellardc27004e2005-01-03 23:35:10 +0000310#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000311#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000312
Paul Brookc527ee82010-03-01 03:31:14 +0000313#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100314void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000315
316{
317}
318
Andreas Färber9349b4f2012-03-14 01:38:32 +0100319int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000320 int flags, CPUWatchpoint **watchpoint)
321{
322 return -ENOSYS;
323}
324#else
pbrook6658ffb2007-03-16 23:58:11 +0000325/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100326int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000327 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000328{
aliguorib4051332008-11-18 20:14:20 +0000329 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000330 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000331
aliguorib4051332008-11-18 20:14:20 +0000332 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400333 if ((len & (len - 1)) || (addr & ~len_mask) ||
334 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000335 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
336 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
337 return -EINVAL;
338 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500339 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000340
aliguoria1d1bb32008-11-18 20:07:32 +0000341 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000342 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000343 wp->flags = flags;
344
aliguori2dc9f412008-11-18 20:56:59 +0000345 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000346 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000347 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000348 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000349 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000350
pbrook6658ffb2007-03-16 23:58:11 +0000351 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000352
353 if (watchpoint)
354 *watchpoint = wp;
355 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000356}
357
aliguoria1d1bb32008-11-18 20:07:32 +0000358/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100359int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000360 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000361{
aliguorib4051332008-11-18 20:14:20 +0000362 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000363 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000364
Blue Swirl72cf2d42009-09-12 07:36:22 +0000365 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000366 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000367 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000368 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000369 return 0;
370 }
371 }
aliguoria1d1bb32008-11-18 20:07:32 +0000372 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000373}
374
aliguoria1d1bb32008-11-18 20:07:32 +0000375/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100376void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000377{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000378 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000379
aliguoria1d1bb32008-11-18 20:07:32 +0000380 tlb_flush_page(env, watchpoint->vaddr);
381
Anthony Liguori7267c092011-08-20 22:09:37 -0500382 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000383}
384
aliguoria1d1bb32008-11-18 20:07:32 +0000385/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100386void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000387{
aliguoric0ce9982008-11-25 22:13:57 +0000388 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000389
Blue Swirl72cf2d42009-09-12 07:36:22 +0000390 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000391 if (wp->flags & mask)
392 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000393 }
aliguoria1d1bb32008-11-18 20:07:32 +0000394}
Paul Brookc527ee82010-03-01 03:31:14 +0000395#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000396
397/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100398int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000399 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000400{
bellard1fddef42005-04-17 19:16:13 +0000401#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000402 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000403
Anthony Liguori7267c092011-08-20 22:09:37 -0500404 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000405
406 bp->pc = pc;
407 bp->flags = flags;
408
aliguori2dc9f412008-11-18 20:56:59 +0000409 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000410 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000411 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000412 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000413 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000414
415 breakpoint_invalidate(env, pc);
416
417 if (breakpoint)
418 *breakpoint = bp;
419 return 0;
420#else
421 return -ENOSYS;
422#endif
423}
424
425/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100426int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000427{
428#if defined(TARGET_HAS_ICE)
429 CPUBreakpoint *bp;
430
Blue Swirl72cf2d42009-09-12 07:36:22 +0000431 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000432 if (bp->pc == pc && bp->flags == flags) {
433 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000434 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000435 }
bellard4c3a88a2003-07-26 12:06:08 +0000436 }
aliguoria1d1bb32008-11-18 20:07:32 +0000437 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000438#else
aliguoria1d1bb32008-11-18 20:07:32 +0000439 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000440#endif
441}
442
aliguoria1d1bb32008-11-18 20:07:32 +0000443/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100444void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000445{
bellard1fddef42005-04-17 19:16:13 +0000446#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000447 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000448
aliguoria1d1bb32008-11-18 20:07:32 +0000449 breakpoint_invalidate(env, breakpoint->pc);
450
Anthony Liguori7267c092011-08-20 22:09:37 -0500451 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000452#endif
453}
454
455/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100456void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000457{
458#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000459 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000460
Blue Swirl72cf2d42009-09-12 07:36:22 +0000461 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000462 if (bp->flags & mask)
463 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000464 }
bellard4c3a88a2003-07-26 12:06:08 +0000465#endif
466}
467
bellardc33a3462003-07-29 20:50:33 +0000468/* enable or disable single step mode. EXCP_DEBUG is returned by the
469 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100470void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000471{
bellard1fddef42005-04-17 19:16:13 +0000472#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000473 if (env->singlestep_enabled != enabled) {
474 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000475 if (kvm_enabled())
476 kvm_update_guest_debug(env, 0);
477 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100478 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000479 /* XXX: only flush what is necessary */
480 tb_flush(env);
481 }
bellardc33a3462003-07-29 20:50:33 +0000482 }
483#endif
484}
485
Andreas Färber9349b4f2012-03-14 01:38:32 +0100486void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000487{
488 env->interrupt_request &= ~mask;
489}
490
Andreas Färber9349b4f2012-03-14 01:38:32 +0100491void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000492{
493 env->exit_request = 1;
494 cpu_unlink_tb(env);
495}
496
Andreas Färber9349b4f2012-03-14 01:38:32 +0100497void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000498{
499 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000500 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000501
502 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000503 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000504 fprintf(stderr, "qemu: fatal: ");
505 vfprintf(stderr, fmt, ap);
506 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100507 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000508 if (qemu_log_enabled()) {
509 qemu_log("qemu: fatal: ");
510 qemu_log_vprintf(fmt, ap2);
511 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100512 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000513 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000514 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000515 }
pbrook493ae1f2007-11-23 16:53:59 +0000516 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000517 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200518#if defined(CONFIG_USER_ONLY)
519 {
520 struct sigaction act;
521 sigfillset(&act.sa_mask);
522 act.sa_handler = SIG_DFL;
523 sigaction(SIGABRT, &act, NULL);
524 }
525#endif
bellard75012672003-06-21 13:11:07 +0000526 abort();
527}
528
Andreas Färber9349b4f2012-03-14 01:38:32 +0100529CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000530{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100531 CPUArchState *new_env = cpu_init(env->cpu_model_str);
532 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +0000533 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000534#if defined(TARGET_HAS_ICE)
535 CPUBreakpoint *bp;
536 CPUWatchpoint *wp;
537#endif
538
Andreas Färber9349b4f2012-03-14 01:38:32 +0100539 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000540
541 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +0000542 new_env->next_cpu = next_cpu;
543 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000544
545 /* Clone all break/watchpoints.
546 Note: Once we support ptrace with hw-debug register access, make sure
547 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000548 QTAILQ_INIT(&env->breakpoints);
549 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000550#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000552 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
553 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000554 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000555 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
556 wp->flags, NULL);
557 }
558#endif
559
thsc5be9f02007-02-28 20:20:53 +0000560 return new_env;
561}
562
bellard01243112004-01-04 15:48:17 +0000563#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200564static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
565 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000566{
Juan Quintelad24981d2012-05-22 00:42:40 +0200567 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000568
bellard1ccde1c2004-02-06 19:46:14 +0000569 /* we modify the TLB cache so that the dirty bit will be set again
570 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200571 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200572 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000573 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200574 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000575 != (end - 1) - start) {
576 abort();
577 }
Blue Swirle5548612012-04-21 13:08:33 +0000578 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200579
580}
581
582/* Note: start and end must be within the same ram block. */
583void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
584 int dirty_flags)
585{
586 uintptr_t length;
587
588 start &= TARGET_PAGE_MASK;
589 end = TARGET_PAGE_ALIGN(end);
590
591 length = end - start;
592 if (length == 0)
593 return;
594 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
595
596 if (tcg_enabled()) {
597 tlb_reset_dirty_range_all(start, end, length);
598 }
bellard1ccde1c2004-02-06 19:46:14 +0000599}
600
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000601static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000602{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200603 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000604 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200605 return ret;
aliguori74576192008-10-06 14:02:03 +0000606}
607
Avi Kivitya8170e52012-10-23 12:30:10 +0200608hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000609 MemoryRegionSection *section,
610 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200611 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000612 int prot,
613 target_ulong *address)
614{
Avi Kivitya8170e52012-10-23 12:30:10 +0200615 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000616 CPUWatchpoint *wp;
617
Blue Swirlcc5bea62012-04-14 14:56:48 +0000618 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000619 /* Normal RAM. */
620 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000621 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000622 if (!section->readonly) {
623 iotlb |= phys_section_notdirty;
624 } else {
625 iotlb |= phys_section_rom;
626 }
627 } else {
628 /* IO handlers are currently passed a physical address.
629 It would be nice to pass an offset from the base address
630 of that region. This would avoid having to special case RAM,
631 and avoid full address decoding in every device.
632 We can't use the high bits of pd for this because
633 IO_MEM_ROMD uses these as a ram address. */
634 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000635 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000636 }
637
638 /* Make accesses to pages with watchpoints go via the
639 watchpoint trap routines. */
640 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
641 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
642 /* Avoid trapping reads of pages with a write breakpoint. */
643 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
644 iotlb = phys_section_watch + paddr;
645 *address |= TLB_MMIO;
646 break;
647 }
648 }
649 }
650
651 return iotlb;
652}
bellard9fa3e852004-01-04 18:06:42 +0000653#endif /* defined(CONFIG_USER_ONLY) */
654
pbrooke2eef172008-06-08 01:09:01 +0000655#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000656
Paul Brookc04b2b72010-03-01 03:31:14 +0000657#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
658typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200659 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200660 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200661 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000662} subpage_t;
663
Anthony Liguoric227f092009-10-01 16:12:16 -0500664static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200665 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200666static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200667static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200668{
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 MemoryRegionSection *section = &phys_sections[section_index];
670 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200671
672 if (mr->subpage) {
673 subpage_t *subpage = container_of(mr, subpage_t, iomem);
674 memory_region_destroy(&subpage->iomem);
675 g_free(subpage);
676 }
677}
678
Avi Kivity4346ae32012-02-10 17:00:01 +0200679static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200680{
681 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200682 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200683
Avi Kivityc19e8802012-02-13 20:25:31 +0200684 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200685 return;
686 }
687
Avi Kivityc19e8802012-02-13 20:25:31 +0200688 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200689 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200690 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200691 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200692 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200693 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200694 }
Avi Kivity54688b12012-02-09 17:34:32 +0200695 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200696 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200697 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200698}
699
Avi Kivityac1970f2012-10-03 16:22:53 +0200700static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200701{
Avi Kivityac1970f2012-10-03 16:22:53 +0200702 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200703 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200704}
705
Avi Kivity5312bd82012-02-12 18:32:55 +0200706static uint16_t phys_section_add(MemoryRegionSection *section)
707{
708 if (phys_sections_nb == phys_sections_nb_alloc) {
709 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
710 phys_sections = g_renew(MemoryRegionSection, phys_sections,
711 phys_sections_nb_alloc);
712 }
713 phys_sections[phys_sections_nb] = *section;
714 return phys_sections_nb++;
715}
716
717static void phys_sections_clear(void)
718{
719 phys_sections_nb = 0;
720}
721
Avi Kivityac1970f2012-10-03 16:22:53 +0200722static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200723{
724 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200725 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200726 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200727 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200728 MemoryRegionSection subsection = {
729 .offset_within_address_space = base,
730 .size = TARGET_PAGE_SIZE,
731 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200732 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200733
Avi Kivityf3705d52012-03-08 16:16:34 +0200734 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200735
Avi Kivityf3705d52012-03-08 16:16:34 +0200736 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737 subpage = subpage_init(base);
738 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200739 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200740 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200741 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200742 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200743 }
744 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400745 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200746 subpage_register(subpage, start, end, phys_section_add(section));
747}
748
749
Avi Kivityac1970f2012-10-03 16:22:53 +0200750static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000751{
Avi Kivitya8170e52012-10-23 12:30:10 +0200752 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200753 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200754 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200755 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200756
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200757 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200758
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200759 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200760 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200761 section_index);
bellard33417e72003-08-10 21:47:01 +0000762}
763
Avi Kivityac1970f2012-10-03 16:22:53 +0200764static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200765{
Avi Kivityac1970f2012-10-03 16:22:53 +0200766 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200767 MemoryRegionSection now = *section, remain = *section;
768
769 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
770 || (now.size < TARGET_PAGE_SIZE)) {
771 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
772 - now.offset_within_address_space,
773 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200774 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200775 remain.size -= now.size;
776 remain.offset_within_address_space += now.size;
777 remain.offset_within_region += now.size;
778 }
Tyler Hall69b67642012-07-25 18:45:04 -0400779 while (remain.size >= TARGET_PAGE_SIZE) {
780 now = remain;
781 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
782 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200783 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400784 } else {
785 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200786 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400787 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200788 remain.size -= now.size;
789 remain.offset_within_address_space += now.size;
790 remain.offset_within_region += now.size;
791 }
792 now = remain;
793 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200794 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200795 }
796}
797
Sheng Yang62a27442010-01-26 19:21:16 +0800798void qemu_flush_coalesced_mmio_buffer(void)
799{
800 if (kvm_enabled())
801 kvm_flush_coalesced_mmio_buffer();
802}
803
Marcelo Tosattic9027602010-03-01 20:25:08 -0300804#if defined(__linux__) && !defined(TARGET_S390X)
805
806#include <sys/vfs.h>
807
808#define HUGETLBFS_MAGIC 0x958458f6
809
810static long gethugepagesize(const char *path)
811{
812 struct statfs fs;
813 int ret;
814
815 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900816 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300817 } while (ret != 0 && errno == EINTR);
818
819 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900820 perror(path);
821 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300822 }
823
824 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900825 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300826
827 return fs.f_bsize;
828}
829
Alex Williamson04b16652010-07-02 11:13:17 -0600830static void *file_ram_alloc(RAMBlock *block,
831 ram_addr_t memory,
832 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300833{
834 char *filename;
835 void *area;
836 int fd;
837#ifdef MAP_POPULATE
838 int flags;
839#endif
840 unsigned long hpagesize;
841
842 hpagesize = gethugepagesize(path);
843 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900844 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300845 }
846
847 if (memory < hpagesize) {
848 return NULL;
849 }
850
851 if (kvm_enabled() && !kvm_has_sync_mmu()) {
852 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
853 return NULL;
854 }
855
856 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900857 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300858 }
859
860 fd = mkstemp(filename);
861 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900862 perror("unable to create backing store for hugepages");
863 free(filename);
864 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300865 }
866 unlink(filename);
867 free(filename);
868
869 memory = (memory+hpagesize-1) & ~(hpagesize-1);
870
871 /*
872 * ftruncate is not supported by hugetlbfs in older
873 * hosts, so don't bother bailing out on errors.
874 * If anything goes wrong with it under other filesystems,
875 * mmap will fail.
876 */
877 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900878 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300879
880#ifdef MAP_POPULATE
881 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
882 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
883 * to sidestep this quirk.
884 */
885 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
886 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
887#else
888 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
889#endif
890 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900891 perror("file_ram_alloc: can't mmap RAM pages");
892 close(fd);
893 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300894 }
Alex Williamson04b16652010-07-02 11:13:17 -0600895 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300896 return area;
897}
898#endif
899
Alex Williamsond17b5282010-06-25 11:08:38 -0600900static ram_addr_t find_ram_offset(ram_addr_t size)
901{
Alex Williamson04b16652010-07-02 11:13:17 -0600902 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600903 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600904
Paolo Bonzinia3161032012-11-14 15:54:48 +0100905 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600906 return 0;
907
Paolo Bonzinia3161032012-11-14 15:54:48 +0100908 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000909 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600910
911 end = block->offset + block->length;
912
Paolo Bonzinia3161032012-11-14 15:54:48 +0100913 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600914 if (next_block->offset >= end) {
915 next = MIN(next, next_block->offset);
916 }
917 }
918 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600919 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600920 mingap = next - end;
921 }
922 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600923
924 if (offset == RAM_ADDR_MAX) {
925 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
926 (uint64_t)size);
927 abort();
928 }
929
Alex Williamson04b16652010-07-02 11:13:17 -0600930 return offset;
931}
932
Juan Quintela652d7ec2012-07-20 10:37:54 +0200933ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600934{
Alex Williamsond17b5282010-06-25 11:08:38 -0600935 RAMBlock *block;
936 ram_addr_t last = 0;
937
Paolo Bonzinia3161032012-11-14 15:54:48 +0100938 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600939 last = MAX(last, block->offset + block->length);
940
941 return last;
942}
943
Jason Baronddb97f12012-08-02 15:44:16 -0400944static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
945{
946 int ret;
947 QemuOpts *machine_opts;
948
949 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
950 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
951 if (machine_opts &&
952 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
953 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
954 if (ret) {
955 perror("qemu_madvise");
956 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
957 "but dump_guest_core=off specified\n");
958 }
959 }
960}
961
Avi Kivityc5705a72011-12-20 15:59:12 +0200962void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600963{
964 RAMBlock *new_block, *block;
965
Avi Kivityc5705a72011-12-20 15:59:12 +0200966 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100967 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200968 if (block->offset == addr) {
969 new_block = block;
970 break;
971 }
972 }
973 assert(new_block);
974 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600975
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600976 if (dev) {
977 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600978 if (id) {
979 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500980 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600981 }
982 }
983 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
984
Paolo Bonzinia3161032012-11-14 15:54:48 +0100985 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200986 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -0600987 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
988 new_block->idstr);
989 abort();
990 }
991 }
Avi Kivityc5705a72011-12-20 15:59:12 +0200992}
993
Luiz Capitulino8490fc72012-09-05 16:50:16 -0300994static int memory_try_enable_merging(void *addr, size_t len)
995{
996 QemuOpts *opts;
997
998 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
999 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1000 /* disabled by the user */
1001 return 0;
1002 }
1003
1004 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1005}
1006
Avi Kivityc5705a72011-12-20 15:59:12 +02001007ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1008 MemoryRegion *mr)
1009{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001010 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001011
1012 size = TARGET_PAGE_ALIGN(size);
1013 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001014
Avi Kivity7c637362011-12-21 13:09:49 +02001015 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001016 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001017 if (host) {
1018 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001019 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001020 } else {
1021 if (mem_path) {
1022#if defined (__linux__) && !defined(TARGET_S390X)
1023 new_block->host = file_ram_alloc(new_block, size, mem_path);
1024 if (!new_block->host) {
1025 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001026 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001027 }
1028#else
1029 fprintf(stderr, "-mem-path option unsupported\n");
1030 exit(1);
1031#endif
1032 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001033 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001034 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001035 } else if (kvm_enabled()) {
1036 /* some s390/kvm configurations have special constraints */
1037 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001038 } else {
1039 new_block->host = qemu_vmalloc(size);
1040 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001041 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001042 }
1043 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001044 new_block->length = size;
1045
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001046 /* Keep the list sorted from biggest to smallest block. */
1047 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1048 if (block->length < new_block->length) {
1049 break;
1050 }
1051 }
1052 if (block) {
1053 QTAILQ_INSERT_BEFORE(block, new_block, next);
1054 } else {
1055 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1056 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001057 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001058
Umesh Deshpandef798b072011-08-18 11:41:17 -07001059 ram_list.version++;
1060
Anthony Liguori7267c092011-08-20 22:09:37 -05001061 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001062 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001063 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1064 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001065 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001066
Jason Baronddb97f12012-08-02 15:44:16 -04001067 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001068 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001069
Cam Macdonell84b89d72010-07-26 18:10:57 -06001070 if (kvm_enabled())
1071 kvm_setup_guest_memory(new_block->host, size);
1072
1073 return new_block->offset;
1074}
1075
Avi Kivityc5705a72011-12-20 15:59:12 +02001076ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001077{
Avi Kivityc5705a72011-12-20 15:59:12 +02001078 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001079}
bellarde9a1ab12007-02-08 23:08:38 +00001080
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001081void qemu_ram_free_from_ptr(ram_addr_t addr)
1082{
1083 RAMBlock *block;
1084
Paolo Bonzinia3161032012-11-14 15:54:48 +01001085 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001086 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001087 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001088 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001089 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001090 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001091 return;
1092 }
1093 }
1094}
1095
Anthony Liguoric227f092009-10-01 16:12:16 -05001096void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001097{
Alex Williamson04b16652010-07-02 11:13:17 -06001098 RAMBlock *block;
1099
Paolo Bonzinia3161032012-11-14 15:54:48 +01001100 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001101 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001102 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001103 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001104 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001105 if (block->flags & RAM_PREALLOC_MASK) {
1106 ;
1107 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001108#if defined (__linux__) && !defined(TARGET_S390X)
1109 if (block->fd) {
1110 munmap(block->host, block->length);
1111 close(block->fd);
1112 } else {
1113 qemu_vfree(block->host);
1114 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001115#else
1116 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001117#endif
1118 } else {
1119#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1120 munmap(block->host, block->length);
1121#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001122 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001123 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001124 } else {
1125 qemu_vfree(block->host);
1126 }
Alex Williamson04b16652010-07-02 11:13:17 -06001127#endif
1128 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001129 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06001130 return;
1131 }
1132 }
1133
bellarde9a1ab12007-02-08 23:08:38 +00001134}
1135
Huang Yingcd19cfa2011-03-02 08:56:19 +01001136#ifndef _WIN32
1137void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1138{
1139 RAMBlock *block;
1140 ram_addr_t offset;
1141 int flags;
1142 void *area, *vaddr;
1143
Paolo Bonzinia3161032012-11-14 15:54:48 +01001144 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001145 offset = addr - block->offset;
1146 if (offset < block->length) {
1147 vaddr = block->host + offset;
1148 if (block->flags & RAM_PREALLOC_MASK) {
1149 ;
1150 } else {
1151 flags = MAP_FIXED;
1152 munmap(vaddr, length);
1153 if (mem_path) {
1154#if defined(__linux__) && !defined(TARGET_S390X)
1155 if (block->fd) {
1156#ifdef MAP_POPULATE
1157 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1158 MAP_PRIVATE;
1159#else
1160 flags |= MAP_PRIVATE;
1161#endif
1162 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1163 flags, block->fd, offset);
1164 } else {
1165 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1166 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1167 flags, -1, 0);
1168 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001169#else
1170 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001171#endif
1172 } else {
1173#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1174 flags |= MAP_SHARED | MAP_ANONYMOUS;
1175 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1176 flags, -1, 0);
1177#else
1178 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1179 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1180 flags, -1, 0);
1181#endif
1182 }
1183 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001184 fprintf(stderr, "Could not remap addr: "
1185 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001186 length, addr);
1187 exit(1);
1188 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001189 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001190 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001191 }
1192 return;
1193 }
1194 }
1195}
1196#endif /* !_WIN32 */
1197
pbrookdc828ca2009-04-09 22:21:07 +00001198/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001199 With the exception of the softmmu code in this file, this should
1200 only be used for local memory (e.g. video ram) that the device owns,
1201 and knows it isn't going to access beyond the end of the block.
1202
1203 It should not be used for general purpose DMA.
1204 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1205 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001206void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001207{
pbrook94a6b542009-04-11 17:15:54 +00001208 RAMBlock *block;
1209
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001210 block = ram_list.mru_block;
1211 if (block && addr - block->offset < block->length) {
1212 goto found;
1213 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001214 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001215 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001216 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001217 }
pbrook94a6b542009-04-11 17:15:54 +00001218 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001219
1220 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1221 abort();
1222
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001223found:
1224 ram_list.mru_block = block;
1225 if (xen_enabled()) {
1226 /* We need to check if the requested address is in the RAM
1227 * because we don't want to map the entire memory in QEMU.
1228 * In that case just map until the end of the page.
1229 */
1230 if (block->offset == 0) {
1231 return xen_map_cache(addr, 0, 0);
1232 } else if (block->host == NULL) {
1233 block->host =
1234 xen_map_cache(block->offset, block->length, 1);
1235 }
1236 }
1237 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001238}
1239
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001240/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1241 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1242 *
1243 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001244 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001245static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001246{
1247 RAMBlock *block;
1248
Paolo Bonzinia3161032012-11-14 15:54:48 +01001249 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001250 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001251 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001252 /* We need to check if the requested address is in the RAM
1253 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001254 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001255 */
1256 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001257 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001258 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001259 block->host =
1260 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001261 }
1262 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001263 return block->host + (addr - block->offset);
1264 }
1265 }
1266
1267 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1268 abort();
1269
1270 return NULL;
1271}
1272
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001273/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1274 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001275static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001276{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001277 if (*size == 0) {
1278 return NULL;
1279 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001280 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001281 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001282 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001283 RAMBlock *block;
1284
Paolo Bonzinia3161032012-11-14 15:54:48 +01001285 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001286 if (addr - block->offset < block->length) {
1287 if (addr - block->offset + *size > block->length)
1288 *size = block->length - addr + block->offset;
1289 return block->host + (addr - block->offset);
1290 }
1291 }
1292
1293 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1294 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001295 }
1296}
1297
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001298void qemu_put_ram_ptr(void *addr)
1299{
1300 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001301}
1302
Marcelo Tosattie8902612010-10-11 15:31:19 -03001303int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001304{
pbrook94a6b542009-04-11 17:15:54 +00001305 RAMBlock *block;
1306 uint8_t *host = ptr;
1307
Jan Kiszka868bb332011-06-21 22:59:09 +02001308 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001309 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001310 return 0;
1311 }
1312
Paolo Bonzinia3161032012-11-14 15:54:48 +01001313 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001314 /* This case append when the block is not mapped. */
1315 if (block->host == NULL) {
1316 continue;
1317 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001318 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001319 *ram_addr = block->offset + (host - block->host);
1320 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001321 }
pbrook94a6b542009-04-11 17:15:54 +00001322 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001323
Marcelo Tosattie8902612010-10-11 15:31:19 -03001324 return -1;
1325}
Alex Williamsonf471a172010-06-11 11:11:42 -06001326
Marcelo Tosattie8902612010-10-11 15:31:19 -03001327/* Some of the softmmu routines need to translate from a host pointer
1328 (typically a TLB entry) back to a ram offset. */
1329ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1330{
1331 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001332
Marcelo Tosattie8902612010-10-11 15:31:19 -03001333 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1334 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1335 abort();
1336 }
1337 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001338}
1339
Avi Kivitya8170e52012-10-23 12:30:10 +02001340static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001341 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001342{
pbrook67d3b952006-12-18 05:03:52 +00001343#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001344 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001345#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001346#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001347 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001348#endif
1349 return 0;
1350}
1351
Avi Kivitya8170e52012-10-23 12:30:10 +02001352static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001353 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001354{
1355#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001356 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001357#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001358#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001359 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001360#endif
1361}
1362
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001363static const MemoryRegionOps unassigned_mem_ops = {
1364 .read = unassigned_mem_read,
1365 .write = unassigned_mem_write,
1366 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001367};
1368
Avi Kivitya8170e52012-10-23 12:30:10 +02001369static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001370 unsigned size)
1371{
1372 abort();
1373}
1374
Avi Kivitya8170e52012-10-23 12:30:10 +02001375static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001376 uint64_t value, unsigned size)
1377{
1378 abort();
1379}
1380
1381static const MemoryRegionOps error_mem_ops = {
1382 .read = error_mem_read,
1383 .write = error_mem_write,
1384 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001385};
1386
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001387static const MemoryRegionOps rom_mem_ops = {
1388 .read = error_mem_read,
1389 .write = unassigned_mem_write,
1390 .endianness = DEVICE_NATIVE_ENDIAN,
1391};
1392
Avi Kivitya8170e52012-10-23 12:30:10 +02001393static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001394 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001395{
bellard3a7d9292005-08-21 09:26:42 +00001396 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001397 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001398 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1399#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001400 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001401 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001402#endif
1403 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001404 switch (size) {
1405 case 1:
1406 stb_p(qemu_get_ram_ptr(ram_addr), val);
1407 break;
1408 case 2:
1409 stw_p(qemu_get_ram_ptr(ram_addr), val);
1410 break;
1411 case 4:
1412 stl_p(qemu_get_ram_ptr(ram_addr), val);
1413 break;
1414 default:
1415 abort();
1416 }
bellardf23db162005-08-21 19:12:28 +00001417 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001418 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001419 /* we remove the notdirty callback only if the code has been
1420 flushed */
1421 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001422 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001423}
1424
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001425static const MemoryRegionOps notdirty_mem_ops = {
1426 .read = error_mem_read,
1427 .write = notdirty_mem_write,
1428 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001429};
1430
pbrook0f459d12008-06-09 00:20:13 +00001431/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001432static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001433{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001434 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001435 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001436 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001437 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001438 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001439
aliguori06d55cc2008-11-18 20:24:06 +00001440 if (env->watchpoint_hit) {
1441 /* We re-entered the check after replacing the TB. Now raise
1442 * the debug interrupt so that is will trigger after the
1443 * current instruction. */
1444 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1445 return;
1446 }
pbrook2e70f6e2008-06-29 01:03:05 +00001447 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001448 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001449 if ((vaddr == (wp->vaddr & len_mask) ||
1450 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001451 wp->flags |= BP_WATCHPOINT_HIT;
1452 if (!env->watchpoint_hit) {
1453 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001454 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001455 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1456 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001457 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001458 } else {
1459 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1460 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001461 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001462 }
aliguori06d55cc2008-11-18 20:24:06 +00001463 }
aliguori6e140f22008-11-18 20:37:55 +00001464 } else {
1465 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001466 }
1467 }
1468}
1469
pbrook6658ffb2007-03-16 23:58:11 +00001470/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1471 so these check for a hit then pass through to the normal out-of-line
1472 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001473static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001474 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001475{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001476 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1477 switch (size) {
1478 case 1: return ldub_phys(addr);
1479 case 2: return lduw_phys(addr);
1480 case 4: return ldl_phys(addr);
1481 default: abort();
1482 }
pbrook6658ffb2007-03-16 23:58:11 +00001483}
1484
Avi Kivitya8170e52012-10-23 12:30:10 +02001485static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001486 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001487{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001488 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1489 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001490 case 1:
1491 stb_phys(addr, val);
1492 break;
1493 case 2:
1494 stw_phys(addr, val);
1495 break;
1496 case 4:
1497 stl_phys(addr, val);
1498 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001499 default: abort();
1500 }
pbrook6658ffb2007-03-16 23:58:11 +00001501}
1502
Avi Kivity1ec9b902012-01-02 12:47:48 +02001503static const MemoryRegionOps watch_mem_ops = {
1504 .read = watch_mem_read,
1505 .write = watch_mem_write,
1506 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001507};
pbrook6658ffb2007-03-16 23:58:11 +00001508
Avi Kivitya8170e52012-10-23 12:30:10 +02001509static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001510 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001511{
Avi Kivity70c68e42012-01-02 12:32:48 +02001512 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001513 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001514 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001515#if defined(DEBUG_SUBPAGE)
1516 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1517 mmio, len, addr, idx);
1518#endif
blueswir1db7b5422007-05-26 17:36:03 +00001519
Avi Kivity5312bd82012-02-12 18:32:55 +02001520 section = &phys_sections[mmio->sub_section[idx]];
1521 addr += mmio->base;
1522 addr -= section->offset_within_address_space;
1523 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001524 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001525}
1526
Avi Kivitya8170e52012-10-23 12:30:10 +02001527static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001528 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001529{
Avi Kivity70c68e42012-01-02 12:32:48 +02001530 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001531 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001532 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001533#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001534 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1535 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001536 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001537#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001538
Avi Kivity5312bd82012-02-12 18:32:55 +02001539 section = &phys_sections[mmio->sub_section[idx]];
1540 addr += mmio->base;
1541 addr -= section->offset_within_address_space;
1542 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001543 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001544}
1545
Avi Kivity70c68e42012-01-02 12:32:48 +02001546static const MemoryRegionOps subpage_ops = {
1547 .read = subpage_read,
1548 .write = subpage_write,
1549 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001550};
1551
Avi Kivitya8170e52012-10-23 12:30:10 +02001552static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001553 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001554{
1555 ram_addr_t raddr = addr;
1556 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001557 switch (size) {
1558 case 1: return ldub_p(ptr);
1559 case 2: return lduw_p(ptr);
1560 case 4: return ldl_p(ptr);
1561 default: abort();
1562 }
Andreas Färber56384e82011-11-30 16:26:21 +01001563}
1564
Avi Kivitya8170e52012-10-23 12:30:10 +02001565static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001566 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001567{
1568 ram_addr_t raddr = addr;
1569 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001570 switch (size) {
1571 case 1: return stb_p(ptr, value);
1572 case 2: return stw_p(ptr, value);
1573 case 4: return stl_p(ptr, value);
1574 default: abort();
1575 }
Andreas Färber56384e82011-11-30 16:26:21 +01001576}
1577
Avi Kivityde712f92012-01-02 12:41:07 +02001578static const MemoryRegionOps subpage_ram_ops = {
1579 .read = subpage_ram_read,
1580 .write = subpage_ram_write,
1581 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001582};
1583
Anthony Liguoric227f092009-10-01 16:12:16 -05001584static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001585 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001586{
1587 int idx, eidx;
1588
1589 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1590 return -1;
1591 idx = SUBPAGE_IDX(start);
1592 eidx = SUBPAGE_IDX(end);
1593#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001594 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001595 mmio, start, end, idx, eidx, memory);
1596#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001597 if (memory_region_is_ram(phys_sections[section].mr)) {
1598 MemoryRegionSection new_section = phys_sections[section];
1599 new_section.mr = &io_mem_subpage_ram;
1600 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001601 }
blueswir1db7b5422007-05-26 17:36:03 +00001602 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001603 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001604 }
1605
1606 return 0;
1607}
1608
Avi Kivitya8170e52012-10-23 12:30:10 +02001609static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001610{
Anthony Liguoric227f092009-10-01 16:12:16 -05001611 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001612
Anthony Liguori7267c092011-08-20 22:09:37 -05001613 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001614
1615 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001616 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1617 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001618 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001619#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001620 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1621 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001622#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001623 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001624
1625 return mmio;
1626}
1627
Avi Kivity5312bd82012-02-12 18:32:55 +02001628static uint16_t dummy_section(MemoryRegion *mr)
1629{
1630 MemoryRegionSection section = {
1631 .mr = mr,
1632 .offset_within_address_space = 0,
1633 .offset_within_region = 0,
1634 .size = UINT64_MAX,
1635 };
1636
1637 return phys_section_add(&section);
1638}
1639
Avi Kivitya8170e52012-10-23 12:30:10 +02001640MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001641{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001642 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001643}
1644
Avi Kivitye9179ce2009-06-14 11:38:52 +03001645static void io_mem_init(void)
1646{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001647 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001648 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1649 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1650 "unassigned", UINT64_MAX);
1651 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1652 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001653 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1654 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001655 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1656 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001657}
1658
Avi Kivityac1970f2012-10-03 16:22:53 +02001659static void mem_begin(MemoryListener *listener)
1660{
1661 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1662
1663 destroy_all_mappings(d);
1664 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1665}
1666
Avi Kivity50c1e142012-02-08 21:36:02 +02001667static void core_begin(MemoryListener *listener)
1668{
Avi Kivity5312bd82012-02-12 18:32:55 +02001669 phys_sections_clear();
1670 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001671 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1672 phys_section_rom = dummy_section(&io_mem_rom);
1673 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001674}
1675
Avi Kivity1d711482012-10-02 18:54:45 +02001676static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001677{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001678 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001679
1680 /* since each CPU stores ram addresses in its TLB cache, we must
1681 reset the modified entries */
1682 /* XXX: slow ! */
1683 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1684 tlb_flush(env, 1);
1685 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001686}
1687
Avi Kivity93632742012-02-08 16:54:16 +02001688static void core_log_global_start(MemoryListener *listener)
1689{
1690 cpu_physical_memory_set_dirty_tracking(1);
1691}
1692
1693static void core_log_global_stop(MemoryListener *listener)
1694{
1695 cpu_physical_memory_set_dirty_tracking(0);
1696}
1697
Avi Kivity4855d412012-02-08 21:16:05 +02001698static void io_region_add(MemoryListener *listener,
1699 MemoryRegionSection *section)
1700{
Avi Kivitya2d33522012-03-05 17:40:12 +02001701 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1702
1703 mrio->mr = section->mr;
1704 mrio->offset = section->offset_within_region;
1705 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001706 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001707 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001708}
1709
1710static void io_region_del(MemoryListener *listener,
1711 MemoryRegionSection *section)
1712{
1713 isa_unassign_ioport(section->offset_within_address_space, section->size);
1714}
1715
Avi Kivity93632742012-02-08 16:54:16 +02001716static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001717 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001718 .log_global_start = core_log_global_start,
1719 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001720 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001721};
1722
Avi Kivity4855d412012-02-08 21:16:05 +02001723static MemoryListener io_memory_listener = {
1724 .region_add = io_region_add,
1725 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001726 .priority = 0,
1727};
1728
Avi Kivity1d711482012-10-02 18:54:45 +02001729static MemoryListener tcg_memory_listener = {
1730 .commit = tcg_commit,
1731};
1732
Avi Kivityac1970f2012-10-03 16:22:53 +02001733void address_space_init_dispatch(AddressSpace *as)
1734{
1735 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1736
1737 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1738 d->listener = (MemoryListener) {
1739 .begin = mem_begin,
1740 .region_add = mem_add,
1741 .region_nop = mem_add,
1742 .priority = 0,
1743 };
1744 as->dispatch = d;
1745 memory_listener_register(&d->listener, as);
1746}
1747
Avi Kivity83f3c252012-10-07 12:59:55 +02001748void address_space_destroy_dispatch(AddressSpace *as)
1749{
1750 AddressSpaceDispatch *d = as->dispatch;
1751
1752 memory_listener_unregister(&d->listener);
1753 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1754 g_free(d);
1755 as->dispatch = NULL;
1756}
1757
Avi Kivity62152b82011-07-26 14:26:14 +03001758static void memory_map_init(void)
1759{
Anthony Liguori7267c092011-08-20 22:09:37 -05001760 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001761 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001762 address_space_init(&address_space_memory, system_memory);
1763 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001764
Anthony Liguori7267c092011-08-20 22:09:37 -05001765 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001766 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001767 address_space_init(&address_space_io, system_io);
1768 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001769
Avi Kivityf6790af2012-10-02 20:13:51 +02001770 memory_listener_register(&core_memory_listener, &address_space_memory);
1771 memory_listener_register(&io_memory_listener, &address_space_io);
1772 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001773
1774 dma_context_init(&dma_context_memory, &address_space_memory,
1775 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001776}
1777
1778MemoryRegion *get_system_memory(void)
1779{
1780 return system_memory;
1781}
1782
Avi Kivity309cb472011-08-08 16:09:03 +03001783MemoryRegion *get_system_io(void)
1784{
1785 return system_io;
1786}
1787
pbrooke2eef172008-06-08 01:09:01 +00001788#endif /* !defined(CONFIG_USER_ONLY) */
1789
bellard13eb76e2004-01-24 15:23:36 +00001790/* physical memory access (slow version, mainly for debug) */
1791#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001792int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001793 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001794{
1795 int l, flags;
1796 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001797 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001798
1799 while (len > 0) {
1800 page = addr & TARGET_PAGE_MASK;
1801 l = (page + TARGET_PAGE_SIZE) - addr;
1802 if (l > len)
1803 l = len;
1804 flags = page_get_flags(page);
1805 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001806 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001807 if (is_write) {
1808 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001809 return -1;
bellard579a97f2007-11-11 14:26:47 +00001810 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001811 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001812 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001813 memcpy(p, buf, l);
1814 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001815 } else {
1816 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001817 return -1;
bellard579a97f2007-11-11 14:26:47 +00001818 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001819 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001820 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001821 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001822 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001823 }
1824 len -= l;
1825 buf += l;
1826 addr += l;
1827 }
Paul Brooka68fe892010-03-01 00:08:59 +00001828 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001829}
bellard8df1cd02005-01-28 22:37:22 +00001830
bellard13eb76e2004-01-24 15:23:36 +00001831#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001832
Avi Kivitya8170e52012-10-23 12:30:10 +02001833static void invalidate_and_set_dirty(hwaddr addr,
1834 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001835{
1836 if (!cpu_physical_memory_is_dirty(addr)) {
1837 /* invalidate code */
1838 tb_invalidate_phys_page_range(addr, addr + length, 0);
1839 /* set dirty bit */
1840 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1841 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001842 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001843}
1844
Avi Kivitya8170e52012-10-23 12:30:10 +02001845void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001846 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001847{
Avi Kivityac1970f2012-10-03 16:22:53 +02001848 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001849 int l;
bellard13eb76e2004-01-24 15:23:36 +00001850 uint8_t *ptr;
1851 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001852 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001853 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001854
bellard13eb76e2004-01-24 15:23:36 +00001855 while (len > 0) {
1856 page = addr & TARGET_PAGE_MASK;
1857 l = (page + TARGET_PAGE_SIZE) - addr;
1858 if (l > len)
1859 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001860 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001861
bellard13eb76e2004-01-24 15:23:36 +00001862 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001863 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001864 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001865 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001866 /* XXX: could force cpu_single_env to NULL to avoid
1867 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001868 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001869 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001870 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001871 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001872 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001873 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001874 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001875 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001876 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001877 l = 2;
1878 } else {
bellard1c213d12005-09-03 10:49:04 +00001879 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001880 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001881 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001882 l = 1;
1883 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001884 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001885 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001886 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001887 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001888 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001889 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001890 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001891 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001892 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001893 }
1894 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001895 if (!(memory_region_is_ram(section->mr) ||
1896 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001897 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001898 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001899 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001900 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001901 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001902 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001903 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001904 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001905 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001906 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001907 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001908 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001909 l = 2;
1910 } else {
bellard1c213d12005-09-03 10:49:04 +00001911 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001912 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001913 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001914 l = 1;
1915 }
1916 } else {
1917 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001918 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001919 + memory_region_section_addr(section,
1920 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001921 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001922 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001923 }
1924 }
1925 len -= l;
1926 buf += l;
1927 addr += l;
1928 }
1929}
bellard8df1cd02005-01-28 22:37:22 +00001930
Avi Kivitya8170e52012-10-23 12:30:10 +02001931void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001932 const uint8_t *buf, int len)
1933{
1934 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1935}
1936
1937/**
1938 * address_space_read: read from an address space.
1939 *
1940 * @as: #AddressSpace to be accessed
1941 * @addr: address within that address space
1942 * @buf: buffer with the data transferred
1943 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001944void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001945{
1946 address_space_rw(as, addr, buf, len, false);
1947}
1948
1949
Avi Kivitya8170e52012-10-23 12:30:10 +02001950void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001951 int len, int is_write)
1952{
1953 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1954}
1955
bellardd0ecd2a2006-04-23 17:14:48 +00001956/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001957void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001958 const uint8_t *buf, int len)
1959{
Avi Kivityac1970f2012-10-03 16:22:53 +02001960 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001961 int l;
1962 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001963 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001964 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001965
bellardd0ecd2a2006-04-23 17:14:48 +00001966 while (len > 0) {
1967 page = addr & TARGET_PAGE_MASK;
1968 l = (page + TARGET_PAGE_SIZE) - addr;
1969 if (l > len)
1970 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001971 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001972
Blue Swirlcc5bea62012-04-14 14:56:48 +00001973 if (!(memory_region_is_ram(section->mr) ||
1974 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001975 /* do nothing */
1976 } else {
1977 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001978 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001979 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00001980 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001981 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00001982 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001983 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001984 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00001985 }
1986 len -= l;
1987 buf += l;
1988 addr += l;
1989 }
1990}
1991
aliguori6d16c2f2009-01-22 16:59:11 +00001992typedef struct {
1993 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02001994 hwaddr addr;
1995 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00001996} BounceBuffer;
1997
1998static BounceBuffer bounce;
1999
aliguoriba223c22009-01-22 16:59:16 +00002000typedef struct MapClient {
2001 void *opaque;
2002 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002003 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002004} MapClient;
2005
Blue Swirl72cf2d42009-09-12 07:36:22 +00002006static QLIST_HEAD(map_client_list, MapClient) map_client_list
2007 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002008
2009void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2010{
Anthony Liguori7267c092011-08-20 22:09:37 -05002011 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002012
2013 client->opaque = opaque;
2014 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002015 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002016 return client;
2017}
2018
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002019static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002020{
2021 MapClient *client = (MapClient *)_client;
2022
Blue Swirl72cf2d42009-09-12 07:36:22 +00002023 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002024 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002025}
2026
2027static void cpu_notify_map_clients(void)
2028{
2029 MapClient *client;
2030
Blue Swirl72cf2d42009-09-12 07:36:22 +00002031 while (!QLIST_EMPTY(&map_client_list)) {
2032 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002033 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002034 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002035 }
2036}
2037
aliguori6d16c2f2009-01-22 16:59:11 +00002038/* Map a physical memory region into a host virtual address.
2039 * May map a subset of the requested range, given by and returned in *plen.
2040 * May return NULL if resources needed to perform the mapping are exhausted.
2041 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002042 * Use cpu_register_map_client() to know when retrying the map operation is
2043 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002044 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002045void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002046 hwaddr addr,
2047 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002048 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002049{
Avi Kivityac1970f2012-10-03 16:22:53 +02002050 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002051 hwaddr len = *plen;
2052 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002053 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002054 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002055 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002056 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002057 ram_addr_t rlen;
2058 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002059
2060 while (len > 0) {
2061 page = addr & TARGET_PAGE_MASK;
2062 l = (page + TARGET_PAGE_SIZE) - addr;
2063 if (l > len)
2064 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002065 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002066
Avi Kivityf3705d52012-03-08 16:16:34 +02002067 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002068 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002069 break;
2070 }
2071 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2072 bounce.addr = addr;
2073 bounce.len = l;
2074 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002075 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002076 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002077
2078 *plen = l;
2079 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002080 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002081 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002082 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002083 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002084 }
aliguori6d16c2f2009-01-22 16:59:11 +00002085
2086 len -= l;
2087 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002088 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002089 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002090 rlen = todo;
2091 ret = qemu_ram_ptr_length(raddr, &rlen);
2092 *plen = rlen;
2093 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002094}
2095
Avi Kivityac1970f2012-10-03 16:22:53 +02002096/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002097 * Will also mark the memory as dirty if is_write == 1. access_len gives
2098 * the amount of memory that was actually read or written by the caller.
2099 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002100void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2101 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002102{
2103 if (buffer != bounce.buffer) {
2104 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002105 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002106 while (access_len) {
2107 unsigned l;
2108 l = TARGET_PAGE_SIZE;
2109 if (l > access_len)
2110 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002111 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002112 addr1 += l;
2113 access_len -= l;
2114 }
2115 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002116 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002117 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002118 }
aliguori6d16c2f2009-01-22 16:59:11 +00002119 return;
2120 }
2121 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002122 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002123 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002124 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002125 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002126 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002127}
bellardd0ecd2a2006-04-23 17:14:48 +00002128
Avi Kivitya8170e52012-10-23 12:30:10 +02002129void *cpu_physical_memory_map(hwaddr addr,
2130 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002131 int is_write)
2132{
2133 return address_space_map(&address_space_memory, addr, plen, is_write);
2134}
2135
Avi Kivitya8170e52012-10-23 12:30:10 +02002136void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2137 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002138{
2139 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2140}
2141
bellard8df1cd02005-01-28 22:37:22 +00002142/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002143static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002144 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002145{
bellard8df1cd02005-01-28 22:37:22 +00002146 uint8_t *ptr;
2147 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002148 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002149
Avi Kivityac1970f2012-10-03 16:22:53 +02002150 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002151
Blue Swirlcc5bea62012-04-14 14:56:48 +00002152 if (!(memory_region_is_ram(section->mr) ||
2153 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002154 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002155 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002156 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002157#if defined(TARGET_WORDS_BIGENDIAN)
2158 if (endian == DEVICE_LITTLE_ENDIAN) {
2159 val = bswap32(val);
2160 }
2161#else
2162 if (endian == DEVICE_BIG_ENDIAN) {
2163 val = bswap32(val);
2164 }
2165#endif
bellard8df1cd02005-01-28 22:37:22 +00002166 } else {
2167 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002168 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002169 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002170 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002171 switch (endian) {
2172 case DEVICE_LITTLE_ENDIAN:
2173 val = ldl_le_p(ptr);
2174 break;
2175 case DEVICE_BIG_ENDIAN:
2176 val = ldl_be_p(ptr);
2177 break;
2178 default:
2179 val = ldl_p(ptr);
2180 break;
2181 }
bellard8df1cd02005-01-28 22:37:22 +00002182 }
2183 return val;
2184}
2185
Avi Kivitya8170e52012-10-23 12:30:10 +02002186uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002187{
2188 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2189}
2190
Avi Kivitya8170e52012-10-23 12:30:10 +02002191uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002192{
2193 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2194}
2195
Avi Kivitya8170e52012-10-23 12:30:10 +02002196uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002197{
2198 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2199}
2200
bellard84b7b8e2005-11-28 21:19:04 +00002201/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002202static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002203 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002204{
bellard84b7b8e2005-11-28 21:19:04 +00002205 uint8_t *ptr;
2206 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002207 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002208
Avi Kivityac1970f2012-10-03 16:22:53 +02002209 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002210
Blue Swirlcc5bea62012-04-14 14:56:48 +00002211 if (!(memory_region_is_ram(section->mr) ||
2212 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002213 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002214 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002215
2216 /* XXX This is broken when device endian != cpu endian.
2217 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002218#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002219 val = io_mem_read(section->mr, addr, 4) << 32;
2220 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002221#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002222 val = io_mem_read(section->mr, addr, 4);
2223 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002224#endif
2225 } else {
2226 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002227 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002228 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002229 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002230 switch (endian) {
2231 case DEVICE_LITTLE_ENDIAN:
2232 val = ldq_le_p(ptr);
2233 break;
2234 case DEVICE_BIG_ENDIAN:
2235 val = ldq_be_p(ptr);
2236 break;
2237 default:
2238 val = ldq_p(ptr);
2239 break;
2240 }
bellard84b7b8e2005-11-28 21:19:04 +00002241 }
2242 return val;
2243}
2244
Avi Kivitya8170e52012-10-23 12:30:10 +02002245uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002246{
2247 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2248}
2249
Avi Kivitya8170e52012-10-23 12:30:10 +02002250uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002251{
2252 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2253}
2254
Avi Kivitya8170e52012-10-23 12:30:10 +02002255uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002256{
2257 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2258}
2259
bellardaab33092005-10-30 20:48:42 +00002260/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002261uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002262{
2263 uint8_t val;
2264 cpu_physical_memory_read(addr, &val, 1);
2265 return val;
2266}
2267
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002268/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002269static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002270 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002271{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002272 uint8_t *ptr;
2273 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002274 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002275
Avi Kivityac1970f2012-10-03 16:22:53 +02002276 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002277
Blue Swirlcc5bea62012-04-14 14:56:48 +00002278 if (!(memory_region_is_ram(section->mr) ||
2279 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002280 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002281 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002282 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002283#if defined(TARGET_WORDS_BIGENDIAN)
2284 if (endian == DEVICE_LITTLE_ENDIAN) {
2285 val = bswap16(val);
2286 }
2287#else
2288 if (endian == DEVICE_BIG_ENDIAN) {
2289 val = bswap16(val);
2290 }
2291#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002292 } else {
2293 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002294 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002295 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002296 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002297 switch (endian) {
2298 case DEVICE_LITTLE_ENDIAN:
2299 val = lduw_le_p(ptr);
2300 break;
2301 case DEVICE_BIG_ENDIAN:
2302 val = lduw_be_p(ptr);
2303 break;
2304 default:
2305 val = lduw_p(ptr);
2306 break;
2307 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002308 }
2309 return val;
bellardaab33092005-10-30 20:48:42 +00002310}
2311
Avi Kivitya8170e52012-10-23 12:30:10 +02002312uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002313{
2314 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2315}
2316
Avi Kivitya8170e52012-10-23 12:30:10 +02002317uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002318{
2319 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2320}
2321
Avi Kivitya8170e52012-10-23 12:30:10 +02002322uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002323{
2324 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2325}
2326
bellard8df1cd02005-01-28 22:37:22 +00002327/* warning: addr must be aligned. The ram page is not masked as dirty
2328 and the code inside is not invalidated. It is useful if the dirty
2329 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002330void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002331{
bellard8df1cd02005-01-28 22:37:22 +00002332 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002333 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002334
Avi Kivityac1970f2012-10-03 16:22:53 +02002335 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002336
Avi Kivityf3705d52012-03-08 16:16:34 +02002337 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002338 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002339 if (memory_region_is_ram(section->mr)) {
2340 section = &phys_sections[phys_section_rom];
2341 }
2342 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002343 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002344 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002345 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002346 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002347 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002348 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002349
2350 if (unlikely(in_migration)) {
2351 if (!cpu_physical_memory_is_dirty(addr1)) {
2352 /* invalidate code */
2353 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2354 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002355 cpu_physical_memory_set_dirty_flags(
2356 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002357 }
2358 }
bellard8df1cd02005-01-28 22:37:22 +00002359 }
2360}
2361
Avi Kivitya8170e52012-10-23 12:30:10 +02002362void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002363{
j_mayerbc98a7e2007-04-04 07:55:12 +00002364 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002365 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002366
Avi Kivityac1970f2012-10-03 16:22:53 +02002367 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002368
Avi Kivityf3705d52012-03-08 16:16:34 +02002369 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002370 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002371 if (memory_region_is_ram(section->mr)) {
2372 section = &phys_sections[phys_section_rom];
2373 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002374#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002375 io_mem_write(section->mr, addr, val >> 32, 4);
2376 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002377#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002378 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2379 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002380#endif
2381 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002382 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002383 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002384 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002385 stq_p(ptr, val);
2386 }
2387}
2388
bellard8df1cd02005-01-28 22:37:22 +00002389/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002390static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002391 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002392{
bellard8df1cd02005-01-28 22:37:22 +00002393 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002394 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002395
Avi Kivityac1970f2012-10-03 16:22:53 +02002396 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002397
Avi Kivityf3705d52012-03-08 16:16:34 +02002398 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002399 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002400 if (memory_region_is_ram(section->mr)) {
2401 section = &phys_sections[phys_section_rom];
2402 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002403#if defined(TARGET_WORDS_BIGENDIAN)
2404 if (endian == DEVICE_LITTLE_ENDIAN) {
2405 val = bswap32(val);
2406 }
2407#else
2408 if (endian == DEVICE_BIG_ENDIAN) {
2409 val = bswap32(val);
2410 }
2411#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002412 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002413 } else {
2414 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002415 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002416 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002417 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002418 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002419 switch (endian) {
2420 case DEVICE_LITTLE_ENDIAN:
2421 stl_le_p(ptr, val);
2422 break;
2423 case DEVICE_BIG_ENDIAN:
2424 stl_be_p(ptr, val);
2425 break;
2426 default:
2427 stl_p(ptr, val);
2428 break;
2429 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002430 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002431 }
2432}
2433
Avi Kivitya8170e52012-10-23 12:30:10 +02002434void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002435{
2436 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2437}
2438
Avi Kivitya8170e52012-10-23 12:30:10 +02002439void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002440{
2441 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2442}
2443
Avi Kivitya8170e52012-10-23 12:30:10 +02002444void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002445{
2446 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2447}
2448
bellardaab33092005-10-30 20:48:42 +00002449/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002450void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002451{
2452 uint8_t v = val;
2453 cpu_physical_memory_write(addr, &v, 1);
2454}
2455
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002456/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002457static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002458 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002459{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002460 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002461 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002462
Avi Kivityac1970f2012-10-03 16:22:53 +02002463 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002464
Avi Kivityf3705d52012-03-08 16:16:34 +02002465 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002466 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002467 if (memory_region_is_ram(section->mr)) {
2468 section = &phys_sections[phys_section_rom];
2469 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002470#if defined(TARGET_WORDS_BIGENDIAN)
2471 if (endian == DEVICE_LITTLE_ENDIAN) {
2472 val = bswap16(val);
2473 }
2474#else
2475 if (endian == DEVICE_BIG_ENDIAN) {
2476 val = bswap16(val);
2477 }
2478#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002479 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002480 } else {
2481 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002482 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002483 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002484 /* RAM case */
2485 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002486 switch (endian) {
2487 case DEVICE_LITTLE_ENDIAN:
2488 stw_le_p(ptr, val);
2489 break;
2490 case DEVICE_BIG_ENDIAN:
2491 stw_be_p(ptr, val);
2492 break;
2493 default:
2494 stw_p(ptr, val);
2495 break;
2496 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002497 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002498 }
bellardaab33092005-10-30 20:48:42 +00002499}
2500
Avi Kivitya8170e52012-10-23 12:30:10 +02002501void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002502{
2503 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2504}
2505
Avi Kivitya8170e52012-10-23 12:30:10 +02002506void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002507{
2508 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2509}
2510
Avi Kivitya8170e52012-10-23 12:30:10 +02002511void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002512{
2513 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2514}
2515
bellardaab33092005-10-30 20:48:42 +00002516/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002517void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002518{
2519 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002520 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002521}
2522
Avi Kivitya8170e52012-10-23 12:30:10 +02002523void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002524{
2525 val = cpu_to_le64(val);
2526 cpu_physical_memory_write(addr, &val, 8);
2527}
2528
Avi Kivitya8170e52012-10-23 12:30:10 +02002529void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002530{
2531 val = cpu_to_be64(val);
2532 cpu_physical_memory_write(addr, &val, 8);
2533}
2534
aliguori5e2972f2009-03-28 17:51:36 +00002535/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002536int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002537 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002538{
2539 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002540 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002541 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002542
2543 while (len > 0) {
2544 page = addr & TARGET_PAGE_MASK;
2545 phys_addr = cpu_get_phys_page_debug(env, page);
2546 /* if no physical page mapped, return an error */
2547 if (phys_addr == -1)
2548 return -1;
2549 l = (page + TARGET_PAGE_SIZE) - addr;
2550 if (l > len)
2551 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002552 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002553 if (is_write)
2554 cpu_physical_memory_write_rom(phys_addr, buf, l);
2555 else
aliguori5e2972f2009-03-28 17:51:36 +00002556 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002557 len -= l;
2558 buf += l;
2559 addr += l;
2560 }
2561 return 0;
2562}
Paul Brooka68fe892010-03-01 00:08:59 +00002563#endif
bellard13eb76e2004-01-24 15:23:36 +00002564
Paul Brookb3755a92010-03-12 16:54:58 +00002565#if !defined(CONFIG_USER_ONLY)
2566
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00002567/*
2568 * A helper function for the _utterly broken_ virtio device model to find out if
2569 * it's running on a big endian machine. Don't do this at home kids!
2570 */
2571bool virtio_is_big_endian(void);
2572bool virtio_is_big_endian(void)
2573{
2574#if defined(TARGET_WORDS_BIGENDIAN)
2575 return true;
2576#else
2577 return false;
2578#endif
2579}
2580
bellard61382a52003-10-27 21:22:23 +00002581#endif
Wen Congyang76f35532012-05-07 12:04:18 +08002582
2583#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002584bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002585{
2586 MemoryRegionSection *section;
2587
Avi Kivityac1970f2012-10-03 16:22:53 +02002588 section = phys_page_find(address_space_memory.dispatch,
2589 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002590
2591 return !(memory_region_is_ram(section->mr) ||
2592 memory_region_is_romd(section->mr));
2593}
2594#endif