blob: 917bec0ecd02442a1771c0fe63be5349acc627d1 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Peter Maydell9e119082012-10-29 11:34:32 +100038#include "dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
43#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
bellard54936002003-05-13 00:25:15 +000046
Paolo Bonzini022c62c2012-12-17 18:19:49 +010047#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000048#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000049
Paolo Bonzini022c62c2012-12-17 18:19:49 +010050#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020051
pbrook67d3b952006-12-18 05:03:52 +000052//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000053//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000054
pbrook99773bd2006-04-16 15:14:59 +000055#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000056int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000057static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000058
Paolo Bonzini85d59fe2011-08-12 13:18:14 +020059RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030060
61static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030062static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030063
Avi Kivityf6790af2012-10-02 20:13:51 +020064AddressSpace address_space_io;
65AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100066DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020067
Avi Kivity0e0df1e2012-01-02 00:32:15 +020068MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020069static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020070
pbrooke2eef172008-06-08 01:09:01 +000071#endif
bellard9fa3e852004-01-04 18:06:42 +000072
Andreas Färber9349b4f2012-03-14 01:38:32 +010073CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000074/* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010076DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000077/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000078 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000079 2 = Adaptive rate instruction counting. */
80int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +000081
pbrooke2eef172008-06-08 01:09:01 +000082#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020083
Avi Kivity5312bd82012-02-12 18:32:55 +020084static MemoryRegionSection *phys_sections;
85static unsigned phys_sections_nb, phys_sections_nb_alloc;
86static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020087static uint16_t phys_section_notdirty;
88static uint16_t phys_section_rom;
89static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020090
Avi Kivityd6f2ea22012-02-12 20:12:49 +020091/* Simple allocator for PhysPageEntry nodes */
92static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
93static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
94
Avi Kivity07f07b32012-02-13 20:45:32 +020095#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020096
pbrooke2eef172008-06-08 01:09:01 +000097static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030098static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +000099static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000100
Avi Kivity1ec9b902012-01-02 12:47:48 +0200101static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000102#endif
bellard54936002003-05-13 00:25:15 +0000103
Paul Brook6d9a1302010-02-28 23:55:53 +0000104#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200105
Avi Kivityf7bf5462012-02-13 20:12:05 +0200106static void phys_map_node_reserve(unsigned nodes)
107{
108 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
109 typedef PhysPageEntry Node[L2_SIZE];
110 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
112 phys_map_nodes_nb + nodes);
113 phys_map_nodes = g_renew(Node, phys_map_nodes,
114 phys_map_nodes_nb_alloc);
115 }
116}
117
118static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200119{
120 unsigned i;
121 uint16_t ret;
122
Avi Kivityf7bf5462012-02-13 20:12:05 +0200123 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200124 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200125 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200126 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200127 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200128 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200129 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200130 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200131}
132
133static void phys_map_nodes_reset(void)
134{
135 phys_map_nodes_nb = 0;
136}
137
Avi Kivityf7bf5462012-02-13 20:12:05 +0200138
Avi Kivitya8170e52012-10-23 12:30:10 +0200139static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
140 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200141 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200142{
143 PhysPageEntry *p;
144 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200145 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200146
Avi Kivity07f07b32012-02-13 20:45:32 +0200147 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200148 lp->ptr = phys_map_node_alloc();
149 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200150 if (level == 0) {
151 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200152 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200153 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200154 }
155 }
156 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200157 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200158 }
Avi Kivity29990972012-02-13 20:21:20 +0200159 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200160
Avi Kivity29990972012-02-13 20:21:20 +0200161 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200162 if ((*index & (step - 1)) == 0 && *nb >= step) {
163 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200164 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200165 *index += step;
166 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200167 } else {
168 phys_page_set_level(lp, index, nb, leaf, level - 1);
169 }
170 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200171 }
172}
173
Avi Kivityac1970f2012-10-03 16:22:53 +0200174static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200175 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200176 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000177{
Avi Kivity29990972012-02-13 20:21:20 +0200178 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200179 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000180
Avi Kivityac1970f2012-10-03 16:22:53 +0200181 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000182}
183
Avi Kivitya8170e52012-10-23 12:30:10 +0200184MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000185{
Avi Kivityac1970f2012-10-03 16:22:53 +0200186 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200187 PhysPageEntry *p;
188 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200189 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200190
Avi Kivity07f07b32012-02-13 20:45:32 +0200191 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200192 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200193 goto not_found;
194 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200195 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200196 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200197 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200198
Avi Kivityc19e8802012-02-13 20:25:31 +0200199 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200200not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200201 return &phys_sections[s_index];
202}
203
Blue Swirle5548612012-04-21 13:08:33 +0000204bool memory_region_is_unassigned(MemoryRegion *mr)
205{
206 return mr != &io_mem_ram && mr != &io_mem_rom
207 && mr != &io_mem_notdirty && !mr->rom_device
208 && mr != &io_mem_watch;
209}
bellard9fa3e852004-01-04 18:06:42 +0000210#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000211
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200212void cpu_exec_init_all(void)
213{
214#if !defined(CONFIG_USER_ONLY)
215 memory_map_init();
216 io_mem_init();
217#endif
218}
219
pbrook9656f322008-07-01 20:01:19 +0000220#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
221
Juan Quintelae59fb372009-09-29 22:48:21 +0200222static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200223{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100224 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225
aurel323098dba2009-03-07 21:28:24 +0000226 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
227 version_id is increased. */
228 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000229 tlb_flush(env, 1);
230
231 return 0;
232}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200233
234static const VMStateDescription vmstate_cpu_common = {
235 .name = "cpu_common",
236 .version_id = 1,
237 .minimum_version_id = 1,
238 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200239 .post_load = cpu_common_post_load,
240 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100241 VMSTATE_UINT32(halted, CPUArchState),
242 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200243 VMSTATE_END_OF_LIST()
244 }
245};
pbrook9656f322008-07-01 20:01:19 +0000246#endif
247
Andreas Färber9349b4f2012-03-14 01:38:32 +0100248CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400249{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100250 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400251
252 while (env) {
253 if (env->cpu_index == cpu)
254 break;
255 env = env->next_cpu;
256 }
257
258 return env;
259}
260
Andreas Färber9349b4f2012-03-14 01:38:32 +0100261void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000262{
Andreas Färber9f09e182012-05-03 06:59:07 +0200263#ifndef CONFIG_USER_ONLY
264 CPUState *cpu = ENV_GET_CPU(env);
265#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100266 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000267 int cpu_index;
268
pbrookc2764712009-03-07 15:24:59 +0000269#if defined(CONFIG_USER_ONLY)
270 cpu_list_lock();
271#endif
bellard6a00d602005-11-21 23:25:50 +0000272 env->next_cpu = NULL;
273 penv = &first_cpu;
274 cpu_index = 0;
275 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700276 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000277 cpu_index++;
278 }
279 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000280 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000281 QTAILQ_INIT(&env->breakpoints);
282 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100283#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200284 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100285#endif
bellard6a00d602005-11-21 23:25:50 +0000286 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000287#if defined(CONFIG_USER_ONLY)
288 cpu_list_unlock();
289#endif
pbrookb3c77242008-06-30 16:31:04 +0000290#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600291 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
292 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000293 cpu_save, cpu_load, env);
294#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000295}
296
bellard1fddef42005-04-17 19:16:13 +0000297#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000298#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100299static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000300{
301 tb_invalidate_phys_page_range(pc, pc + 1, 0);
302}
303#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400304static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
305{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400306 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
307 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400308}
bellardc27004e2005-01-03 23:35:10 +0000309#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000310#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000311
Paul Brookc527ee82010-03-01 03:31:14 +0000312#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100313void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000314
315{
316}
317
Andreas Färber9349b4f2012-03-14 01:38:32 +0100318int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000319 int flags, CPUWatchpoint **watchpoint)
320{
321 return -ENOSYS;
322}
323#else
pbrook6658ffb2007-03-16 23:58:11 +0000324/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100325int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000326 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000327{
aliguorib4051332008-11-18 20:14:20 +0000328 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000329 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000330
aliguorib4051332008-11-18 20:14:20 +0000331 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400332 if ((len & (len - 1)) || (addr & ~len_mask) ||
333 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000334 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
335 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
336 return -EINVAL;
337 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500338 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000339
aliguoria1d1bb32008-11-18 20:07:32 +0000340 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000341 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000342 wp->flags = flags;
343
aliguori2dc9f412008-11-18 20:56:59 +0000344 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000345 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000346 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000347 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000348 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000349
pbrook6658ffb2007-03-16 23:58:11 +0000350 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000351
352 if (watchpoint)
353 *watchpoint = wp;
354 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000355}
356
aliguoria1d1bb32008-11-18 20:07:32 +0000357/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100358int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000359 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000360{
aliguorib4051332008-11-18 20:14:20 +0000361 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000362 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000363
Blue Swirl72cf2d42009-09-12 07:36:22 +0000364 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000365 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000366 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000367 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000368 return 0;
369 }
370 }
aliguoria1d1bb32008-11-18 20:07:32 +0000371 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000372}
373
aliguoria1d1bb32008-11-18 20:07:32 +0000374/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100375void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000376{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000377 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000378
aliguoria1d1bb32008-11-18 20:07:32 +0000379 tlb_flush_page(env, watchpoint->vaddr);
380
Anthony Liguori7267c092011-08-20 22:09:37 -0500381 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000382}
383
aliguoria1d1bb32008-11-18 20:07:32 +0000384/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100385void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000386{
aliguoric0ce9982008-11-25 22:13:57 +0000387 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000388
Blue Swirl72cf2d42009-09-12 07:36:22 +0000389 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000390 if (wp->flags & mask)
391 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000392 }
aliguoria1d1bb32008-11-18 20:07:32 +0000393}
Paul Brookc527ee82010-03-01 03:31:14 +0000394#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000395
396/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100397int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000398 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000399{
bellard1fddef42005-04-17 19:16:13 +0000400#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000401 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000402
Anthony Liguori7267c092011-08-20 22:09:37 -0500403 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000404
405 bp->pc = pc;
406 bp->flags = flags;
407
aliguori2dc9f412008-11-18 20:56:59 +0000408 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000409 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000410 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000411 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000412 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000413
414 breakpoint_invalidate(env, pc);
415
416 if (breakpoint)
417 *breakpoint = bp;
418 return 0;
419#else
420 return -ENOSYS;
421#endif
422}
423
424/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100425int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000426{
427#if defined(TARGET_HAS_ICE)
428 CPUBreakpoint *bp;
429
Blue Swirl72cf2d42009-09-12 07:36:22 +0000430 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000431 if (bp->pc == pc && bp->flags == flags) {
432 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000433 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000434 }
bellard4c3a88a2003-07-26 12:06:08 +0000435 }
aliguoria1d1bb32008-11-18 20:07:32 +0000436 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000437#else
aliguoria1d1bb32008-11-18 20:07:32 +0000438 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000439#endif
440}
441
aliguoria1d1bb32008-11-18 20:07:32 +0000442/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100443void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000444{
bellard1fddef42005-04-17 19:16:13 +0000445#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000446 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000447
aliguoria1d1bb32008-11-18 20:07:32 +0000448 breakpoint_invalidate(env, breakpoint->pc);
449
Anthony Liguori7267c092011-08-20 22:09:37 -0500450 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000451#endif
452}
453
454/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100455void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000456{
457#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000458 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000459
Blue Swirl72cf2d42009-09-12 07:36:22 +0000460 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000461 if (bp->flags & mask)
462 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000463 }
bellard4c3a88a2003-07-26 12:06:08 +0000464#endif
465}
466
bellardc33a3462003-07-29 20:50:33 +0000467/* enable or disable single step mode. EXCP_DEBUG is returned by the
468 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100469void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000470{
bellard1fddef42005-04-17 19:16:13 +0000471#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000472 if (env->singlestep_enabled != enabled) {
473 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000474 if (kvm_enabled())
475 kvm_update_guest_debug(env, 0);
476 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100477 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000478 /* XXX: only flush what is necessary */
479 tb_flush(env);
480 }
bellardc33a3462003-07-29 20:50:33 +0000481 }
482#endif
483}
484
Andreas Färber9349b4f2012-03-14 01:38:32 +0100485void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000486{
487 env->interrupt_request &= ~mask;
488}
489
Andreas Färber9349b4f2012-03-14 01:38:32 +0100490void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000491{
492 env->exit_request = 1;
493 cpu_unlink_tb(env);
494}
495
Andreas Färber9349b4f2012-03-14 01:38:32 +0100496void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000497{
498 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000499 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000500
501 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000502 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000503 fprintf(stderr, "qemu: fatal: ");
504 vfprintf(stderr, fmt, ap);
505 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100506 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000507 if (qemu_log_enabled()) {
508 qemu_log("qemu: fatal: ");
509 qemu_log_vprintf(fmt, ap2);
510 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100511 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000512 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000513 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000514 }
pbrook493ae1f2007-11-23 16:53:59 +0000515 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000516 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200517#if defined(CONFIG_USER_ONLY)
518 {
519 struct sigaction act;
520 sigfillset(&act.sa_mask);
521 act.sa_handler = SIG_DFL;
522 sigaction(SIGABRT, &act, NULL);
523 }
524#endif
bellard75012672003-06-21 13:11:07 +0000525 abort();
526}
527
Andreas Färber9349b4f2012-03-14 01:38:32 +0100528CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000529{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100530 CPUArchState *new_env = cpu_init(env->cpu_model_str);
531 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +0000532 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000533#if defined(TARGET_HAS_ICE)
534 CPUBreakpoint *bp;
535 CPUWatchpoint *wp;
536#endif
537
Andreas Färber9349b4f2012-03-14 01:38:32 +0100538 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000539
540 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +0000541 new_env->next_cpu = next_cpu;
542 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000543
544 /* Clone all break/watchpoints.
545 Note: Once we support ptrace with hw-debug register access, make sure
546 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000547 QTAILQ_INIT(&env->breakpoints);
548 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000549#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000550 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000551 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
552 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000553 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000554 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
555 wp->flags, NULL);
556 }
557#endif
558
thsc5be9f02007-02-28 20:20:53 +0000559 return new_env;
560}
561
bellard01243112004-01-04 15:48:17 +0000562#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200563static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
564 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000565{
Juan Quintelad24981d2012-05-22 00:42:40 +0200566 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000567
bellard1ccde1c2004-02-06 19:46:14 +0000568 /* we modify the TLB cache so that the dirty bit will be set again
569 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200570 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200571 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000572 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200573 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000574 != (end - 1) - start) {
575 abort();
576 }
Blue Swirle5548612012-04-21 13:08:33 +0000577 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200578
579}
580
581/* Note: start and end must be within the same ram block. */
582void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
583 int dirty_flags)
584{
585 uintptr_t length;
586
587 start &= TARGET_PAGE_MASK;
588 end = TARGET_PAGE_ALIGN(end);
589
590 length = end - start;
591 if (length == 0)
592 return;
593 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
594
595 if (tcg_enabled()) {
596 tlb_reset_dirty_range_all(start, end, length);
597 }
bellard1ccde1c2004-02-06 19:46:14 +0000598}
599
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000600static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000601{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200602 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000603 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200604 return ret;
aliguori74576192008-10-06 14:02:03 +0000605}
606
Avi Kivitya8170e52012-10-23 12:30:10 +0200607hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000608 MemoryRegionSection *section,
609 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200610 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000611 int prot,
612 target_ulong *address)
613{
Avi Kivitya8170e52012-10-23 12:30:10 +0200614 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000615 CPUWatchpoint *wp;
616
Blue Swirlcc5bea62012-04-14 14:56:48 +0000617 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000618 /* Normal RAM. */
619 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000620 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000621 if (!section->readonly) {
622 iotlb |= phys_section_notdirty;
623 } else {
624 iotlb |= phys_section_rom;
625 }
626 } else {
627 /* IO handlers are currently passed a physical address.
628 It would be nice to pass an offset from the base address
629 of that region. This would avoid having to special case RAM,
630 and avoid full address decoding in every device.
631 We can't use the high bits of pd for this because
632 IO_MEM_ROMD uses these as a ram address. */
633 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000634 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000635 }
636
637 /* Make accesses to pages with watchpoints go via the
638 watchpoint trap routines. */
639 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
640 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
641 /* Avoid trapping reads of pages with a write breakpoint. */
642 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
643 iotlb = phys_section_watch + paddr;
644 *address |= TLB_MMIO;
645 break;
646 }
647 }
648 }
649
650 return iotlb;
651}
bellard9fa3e852004-01-04 18:06:42 +0000652#endif /* defined(CONFIG_USER_ONLY) */
653
pbrooke2eef172008-06-08 01:09:01 +0000654#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000655
Paul Brookc04b2b72010-03-01 03:31:14 +0000656#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
657typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200658 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200659 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200660 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000661} subpage_t;
662
Anthony Liguoric227f092009-10-01 16:12:16 -0500663static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200664 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200665static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200666static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200667{
Avi Kivity5312bd82012-02-12 18:32:55 +0200668 MemoryRegionSection *section = &phys_sections[section_index];
669 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200670
671 if (mr->subpage) {
672 subpage_t *subpage = container_of(mr, subpage_t, iomem);
673 memory_region_destroy(&subpage->iomem);
674 g_free(subpage);
675 }
676}
677
Avi Kivity4346ae32012-02-10 17:00:01 +0200678static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200679{
680 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200681 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200682
Avi Kivityc19e8802012-02-13 20:25:31 +0200683 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200684 return;
685 }
686
Avi Kivityc19e8802012-02-13 20:25:31 +0200687 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200688 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200689 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200690 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200691 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200692 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200693 }
Avi Kivity54688b12012-02-09 17:34:32 +0200694 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200695 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200696 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200697}
698
Avi Kivityac1970f2012-10-03 16:22:53 +0200699static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200700{
Avi Kivityac1970f2012-10-03 16:22:53 +0200701 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200702 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200703}
704
Avi Kivity5312bd82012-02-12 18:32:55 +0200705static uint16_t phys_section_add(MemoryRegionSection *section)
706{
707 if (phys_sections_nb == phys_sections_nb_alloc) {
708 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
709 phys_sections = g_renew(MemoryRegionSection, phys_sections,
710 phys_sections_nb_alloc);
711 }
712 phys_sections[phys_sections_nb] = *section;
713 return phys_sections_nb++;
714}
715
716static void phys_sections_clear(void)
717{
718 phys_sections_nb = 0;
719}
720
Avi Kivityac1970f2012-10-03 16:22:53 +0200721static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200722{
723 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200724 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200725 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200726 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200727 MemoryRegionSection subsection = {
728 .offset_within_address_space = base,
729 .size = TARGET_PAGE_SIZE,
730 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200731 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200732
Avi Kivityf3705d52012-03-08 16:16:34 +0200733 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200734
Avi Kivityf3705d52012-03-08 16:16:34 +0200735 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200736 subpage = subpage_init(base);
737 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200738 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200739 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200740 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200741 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200742 }
743 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400744 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200745 subpage_register(subpage, start, end, phys_section_add(section));
746}
747
748
Avi Kivityac1970f2012-10-03 16:22:53 +0200749static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000750{
Avi Kivitya8170e52012-10-23 12:30:10 +0200751 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200752 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200753 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200754 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200755
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200756 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200757
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200758 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200759 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200760 section_index);
bellard33417e72003-08-10 21:47:01 +0000761}
762
Avi Kivityac1970f2012-10-03 16:22:53 +0200763static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200764{
Avi Kivityac1970f2012-10-03 16:22:53 +0200765 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200766 MemoryRegionSection now = *section, remain = *section;
767
768 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
769 || (now.size < TARGET_PAGE_SIZE)) {
770 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
771 - now.offset_within_address_space,
772 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200773 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200774 remain.size -= now.size;
775 remain.offset_within_address_space += now.size;
776 remain.offset_within_region += now.size;
777 }
Tyler Hall69b67642012-07-25 18:45:04 -0400778 while (remain.size >= TARGET_PAGE_SIZE) {
779 now = remain;
780 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
781 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200782 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400783 } else {
784 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200785 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400786 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200787 remain.size -= now.size;
788 remain.offset_within_address_space += now.size;
789 remain.offset_within_region += now.size;
790 }
791 now = remain;
792 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200793 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200794 }
795}
796
Sheng Yang62a27442010-01-26 19:21:16 +0800797void qemu_flush_coalesced_mmio_buffer(void)
798{
799 if (kvm_enabled())
800 kvm_flush_coalesced_mmio_buffer();
801}
802
Marcelo Tosattic9027602010-03-01 20:25:08 -0300803#if defined(__linux__) && !defined(TARGET_S390X)
804
805#include <sys/vfs.h>
806
807#define HUGETLBFS_MAGIC 0x958458f6
808
809static long gethugepagesize(const char *path)
810{
811 struct statfs fs;
812 int ret;
813
814 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900815 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300816 } while (ret != 0 && errno == EINTR);
817
818 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900819 perror(path);
820 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300821 }
822
823 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900824 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300825
826 return fs.f_bsize;
827}
828
Alex Williamson04b16652010-07-02 11:13:17 -0600829static void *file_ram_alloc(RAMBlock *block,
830 ram_addr_t memory,
831 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300832{
833 char *filename;
834 void *area;
835 int fd;
836#ifdef MAP_POPULATE
837 int flags;
838#endif
839 unsigned long hpagesize;
840
841 hpagesize = gethugepagesize(path);
842 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900843 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300844 }
845
846 if (memory < hpagesize) {
847 return NULL;
848 }
849
850 if (kvm_enabled() && !kvm_has_sync_mmu()) {
851 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
852 return NULL;
853 }
854
855 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900856 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300857 }
858
859 fd = mkstemp(filename);
860 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900861 perror("unable to create backing store for hugepages");
862 free(filename);
863 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300864 }
865 unlink(filename);
866 free(filename);
867
868 memory = (memory+hpagesize-1) & ~(hpagesize-1);
869
870 /*
871 * ftruncate is not supported by hugetlbfs in older
872 * hosts, so don't bother bailing out on errors.
873 * If anything goes wrong with it under other filesystems,
874 * mmap will fail.
875 */
876 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900877 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300878
879#ifdef MAP_POPULATE
880 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
881 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
882 * to sidestep this quirk.
883 */
884 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
885 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
886#else
887 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
888#endif
889 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900890 perror("file_ram_alloc: can't mmap RAM pages");
891 close(fd);
892 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300893 }
Alex Williamson04b16652010-07-02 11:13:17 -0600894 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300895 return area;
896}
897#endif
898
Alex Williamsond17b5282010-06-25 11:08:38 -0600899static ram_addr_t find_ram_offset(ram_addr_t size)
900{
Alex Williamson04b16652010-07-02 11:13:17 -0600901 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600902 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600903
904 if (QLIST_EMPTY(&ram_list.blocks))
905 return 0;
906
907 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000908 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600909
910 end = block->offset + block->length;
911
912 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
913 if (next_block->offset >= end) {
914 next = MIN(next, next_block->offset);
915 }
916 }
917 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600918 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600919 mingap = next - end;
920 }
921 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600922
923 if (offset == RAM_ADDR_MAX) {
924 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
925 (uint64_t)size);
926 abort();
927 }
928
Alex Williamson04b16652010-07-02 11:13:17 -0600929 return offset;
930}
931
Juan Quintela652d7ec2012-07-20 10:37:54 +0200932ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600933{
Alex Williamsond17b5282010-06-25 11:08:38 -0600934 RAMBlock *block;
935 ram_addr_t last = 0;
936
937 QLIST_FOREACH(block, &ram_list.blocks, next)
938 last = MAX(last, block->offset + block->length);
939
940 return last;
941}
942
Jason Baronddb97f12012-08-02 15:44:16 -0400943static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
944{
945 int ret;
946 QemuOpts *machine_opts;
947
948 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
949 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
950 if (machine_opts &&
951 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
952 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
953 if (ret) {
954 perror("qemu_madvise");
955 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
956 "but dump_guest_core=off specified\n");
957 }
958 }
959}
960
Avi Kivityc5705a72011-12-20 15:59:12 +0200961void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600962{
963 RAMBlock *new_block, *block;
964
Avi Kivityc5705a72011-12-20 15:59:12 +0200965 new_block = NULL;
966 QLIST_FOREACH(block, &ram_list.blocks, next) {
967 if (block->offset == addr) {
968 new_block = block;
969 break;
970 }
971 }
972 assert(new_block);
973 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600974
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600975 if (dev) {
976 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600977 if (id) {
978 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500979 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600980 }
981 }
982 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
983
984 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200985 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -0600986 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
987 new_block->idstr);
988 abort();
989 }
990 }
Avi Kivityc5705a72011-12-20 15:59:12 +0200991}
992
Luiz Capitulino8490fc72012-09-05 16:50:16 -0300993static int memory_try_enable_merging(void *addr, size_t len)
994{
995 QemuOpts *opts;
996
997 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
998 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
999 /* disabled by the user */
1000 return 0;
1001 }
1002
1003 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1004}
1005
Avi Kivityc5705a72011-12-20 15:59:12 +02001006ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1007 MemoryRegion *mr)
1008{
1009 RAMBlock *new_block;
1010
1011 size = TARGET_PAGE_ALIGN(size);
1012 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001013
Avi Kivity7c637362011-12-21 13:09:49 +02001014 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001015 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001016 if (host) {
1017 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001018 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001019 } else {
1020 if (mem_path) {
1021#if defined (__linux__) && !defined(TARGET_S390X)
1022 new_block->host = file_ram_alloc(new_block, size, mem_path);
1023 if (!new_block->host) {
1024 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001025 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001026 }
1027#else
1028 fprintf(stderr, "-mem-path option unsupported\n");
1029 exit(1);
1030#endif
1031 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001032 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001033 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001034 } else if (kvm_enabled()) {
1035 /* some s390/kvm configurations have special constraints */
1036 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001037 } else {
1038 new_block->host = qemu_vmalloc(size);
1039 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001040 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001041 }
1042 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001043 new_block->length = size;
1044
1045 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
1046
Anthony Liguori7267c092011-08-20 22:09:37 -05001047 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001048 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001049 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1050 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001051 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001052
Jason Baronddb97f12012-08-02 15:44:16 -04001053 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001054 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001055
Cam Macdonell84b89d72010-07-26 18:10:57 -06001056 if (kvm_enabled())
1057 kvm_setup_guest_memory(new_block->host, size);
1058
1059 return new_block->offset;
1060}
1061
Avi Kivityc5705a72011-12-20 15:59:12 +02001062ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001063{
Avi Kivityc5705a72011-12-20 15:59:12 +02001064 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001065}
bellarde9a1ab12007-02-08 23:08:38 +00001066
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001067void qemu_ram_free_from_ptr(ram_addr_t addr)
1068{
1069 RAMBlock *block;
1070
1071 QLIST_FOREACH(block, &ram_list.blocks, next) {
1072 if (addr == block->offset) {
1073 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05001074 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001075 return;
1076 }
1077 }
1078}
1079
Anthony Liguoric227f092009-10-01 16:12:16 -05001080void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001081{
Alex Williamson04b16652010-07-02 11:13:17 -06001082 RAMBlock *block;
1083
1084 QLIST_FOREACH(block, &ram_list.blocks, next) {
1085 if (addr == block->offset) {
1086 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001087 if (block->flags & RAM_PREALLOC_MASK) {
1088 ;
1089 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001090#if defined (__linux__) && !defined(TARGET_S390X)
1091 if (block->fd) {
1092 munmap(block->host, block->length);
1093 close(block->fd);
1094 } else {
1095 qemu_vfree(block->host);
1096 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001097#else
1098 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001099#endif
1100 } else {
1101#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1102 munmap(block->host, block->length);
1103#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001104 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001105 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001106 } else {
1107 qemu_vfree(block->host);
1108 }
Alex Williamson04b16652010-07-02 11:13:17 -06001109#endif
1110 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001111 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06001112 return;
1113 }
1114 }
1115
bellarde9a1ab12007-02-08 23:08:38 +00001116}
1117
Huang Yingcd19cfa2011-03-02 08:56:19 +01001118#ifndef _WIN32
1119void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1120{
1121 RAMBlock *block;
1122 ram_addr_t offset;
1123 int flags;
1124 void *area, *vaddr;
1125
1126 QLIST_FOREACH(block, &ram_list.blocks, next) {
1127 offset = addr - block->offset;
1128 if (offset < block->length) {
1129 vaddr = block->host + offset;
1130 if (block->flags & RAM_PREALLOC_MASK) {
1131 ;
1132 } else {
1133 flags = MAP_FIXED;
1134 munmap(vaddr, length);
1135 if (mem_path) {
1136#if defined(__linux__) && !defined(TARGET_S390X)
1137 if (block->fd) {
1138#ifdef MAP_POPULATE
1139 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1140 MAP_PRIVATE;
1141#else
1142 flags |= MAP_PRIVATE;
1143#endif
1144 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1145 flags, block->fd, offset);
1146 } else {
1147 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1148 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1149 flags, -1, 0);
1150 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001151#else
1152 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001153#endif
1154 } else {
1155#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1156 flags |= MAP_SHARED | MAP_ANONYMOUS;
1157 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1158 flags, -1, 0);
1159#else
1160 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1161 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1162 flags, -1, 0);
1163#endif
1164 }
1165 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001166 fprintf(stderr, "Could not remap addr: "
1167 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001168 length, addr);
1169 exit(1);
1170 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001171 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001172 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001173 }
1174 return;
1175 }
1176 }
1177}
1178#endif /* !_WIN32 */
1179
pbrookdc828ca2009-04-09 22:21:07 +00001180/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001181 With the exception of the softmmu code in this file, this should
1182 only be used for local memory (e.g. video ram) that the device owns,
1183 and knows it isn't going to access beyond the end of the block.
1184
1185 It should not be used for general purpose DMA.
1186 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1187 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001188void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001189{
pbrook94a6b542009-04-11 17:15:54 +00001190 RAMBlock *block;
1191
Alex Williamsonf471a172010-06-11 11:11:42 -06001192 QLIST_FOREACH(block, &ram_list.blocks, next) {
1193 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05001194 /* Move this entry to to start of the list. */
1195 if (block != QLIST_FIRST(&ram_list.blocks)) {
1196 QLIST_REMOVE(block, next);
1197 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
1198 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001199 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001200 /* We need to check if the requested address is in the RAM
1201 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001202 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001203 */
1204 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001205 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001206 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001207 block->host =
1208 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001209 }
1210 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001211 return block->host + (addr - block->offset);
1212 }
pbrook94a6b542009-04-11 17:15:54 +00001213 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001214
1215 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1216 abort();
1217
1218 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00001219}
1220
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001221/* Return a host pointer to ram allocated with qemu_ram_alloc.
1222 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
1223 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001224static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001225{
1226 RAMBlock *block;
1227
1228 QLIST_FOREACH(block, &ram_list.blocks, next) {
1229 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001230 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001231 /* We need to check if the requested address is in the RAM
1232 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001233 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001234 */
1235 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001236 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001237 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001238 block->host =
1239 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001240 }
1241 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001242 return block->host + (addr - block->offset);
1243 }
1244 }
1245
1246 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1247 abort();
1248
1249 return NULL;
1250}
1251
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001252/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1253 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001254static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001255{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001256 if (*size == 0) {
1257 return NULL;
1258 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001259 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001260 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001261 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001262 RAMBlock *block;
1263
1264 QLIST_FOREACH(block, &ram_list.blocks, next) {
1265 if (addr - block->offset < block->length) {
1266 if (addr - block->offset + *size > block->length)
1267 *size = block->length - addr + block->offset;
1268 return block->host + (addr - block->offset);
1269 }
1270 }
1271
1272 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1273 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001274 }
1275}
1276
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001277void qemu_put_ram_ptr(void *addr)
1278{
1279 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001280}
1281
Marcelo Tosattie8902612010-10-11 15:31:19 -03001282int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001283{
pbrook94a6b542009-04-11 17:15:54 +00001284 RAMBlock *block;
1285 uint8_t *host = ptr;
1286
Jan Kiszka868bb332011-06-21 22:59:09 +02001287 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001288 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001289 return 0;
1290 }
1291
Alex Williamsonf471a172010-06-11 11:11:42 -06001292 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001293 /* This case append when the block is not mapped. */
1294 if (block->host == NULL) {
1295 continue;
1296 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001297 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001298 *ram_addr = block->offset + (host - block->host);
1299 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001300 }
pbrook94a6b542009-04-11 17:15:54 +00001301 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001302
Marcelo Tosattie8902612010-10-11 15:31:19 -03001303 return -1;
1304}
Alex Williamsonf471a172010-06-11 11:11:42 -06001305
Marcelo Tosattie8902612010-10-11 15:31:19 -03001306/* Some of the softmmu routines need to translate from a host pointer
1307 (typically a TLB entry) back to a ram offset. */
1308ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1309{
1310 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001311
Marcelo Tosattie8902612010-10-11 15:31:19 -03001312 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1313 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1314 abort();
1315 }
1316 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001317}
1318
Avi Kivitya8170e52012-10-23 12:30:10 +02001319static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001320 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001321{
pbrook67d3b952006-12-18 05:03:52 +00001322#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001323 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001324#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001325#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001326 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001327#endif
1328 return 0;
1329}
1330
Avi Kivitya8170e52012-10-23 12:30:10 +02001331static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001332 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001333{
1334#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001335 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001336#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001337#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001338 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001339#endif
1340}
1341
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001342static const MemoryRegionOps unassigned_mem_ops = {
1343 .read = unassigned_mem_read,
1344 .write = unassigned_mem_write,
1345 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001346};
1347
Avi Kivitya8170e52012-10-23 12:30:10 +02001348static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001349 unsigned size)
1350{
1351 abort();
1352}
1353
Avi Kivitya8170e52012-10-23 12:30:10 +02001354static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001355 uint64_t value, unsigned size)
1356{
1357 abort();
1358}
1359
1360static const MemoryRegionOps error_mem_ops = {
1361 .read = error_mem_read,
1362 .write = error_mem_write,
1363 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001364};
1365
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001366static const MemoryRegionOps rom_mem_ops = {
1367 .read = error_mem_read,
1368 .write = unassigned_mem_write,
1369 .endianness = DEVICE_NATIVE_ENDIAN,
1370};
1371
Avi Kivitya8170e52012-10-23 12:30:10 +02001372static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001373 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001374{
bellard3a7d9292005-08-21 09:26:42 +00001375 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001376 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001377 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1378#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001379 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001380 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001381#endif
1382 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001383 switch (size) {
1384 case 1:
1385 stb_p(qemu_get_ram_ptr(ram_addr), val);
1386 break;
1387 case 2:
1388 stw_p(qemu_get_ram_ptr(ram_addr), val);
1389 break;
1390 case 4:
1391 stl_p(qemu_get_ram_ptr(ram_addr), val);
1392 break;
1393 default:
1394 abort();
1395 }
bellardf23db162005-08-21 19:12:28 +00001396 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001397 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001398 /* we remove the notdirty callback only if the code has been
1399 flushed */
1400 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001401 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001402}
1403
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001404static const MemoryRegionOps notdirty_mem_ops = {
1405 .read = error_mem_read,
1406 .write = notdirty_mem_write,
1407 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001408};
1409
pbrook0f459d12008-06-09 00:20:13 +00001410/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001411static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001412{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001413 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001414 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001415 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001416 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001417 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001418
aliguori06d55cc2008-11-18 20:24:06 +00001419 if (env->watchpoint_hit) {
1420 /* We re-entered the check after replacing the TB. Now raise
1421 * the debug interrupt so that is will trigger after the
1422 * current instruction. */
1423 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1424 return;
1425 }
pbrook2e70f6e2008-06-29 01:03:05 +00001426 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001427 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001428 if ((vaddr == (wp->vaddr & len_mask) ||
1429 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001430 wp->flags |= BP_WATCHPOINT_HIT;
1431 if (!env->watchpoint_hit) {
1432 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001433 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001434 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1435 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001436 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001437 } else {
1438 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1439 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001440 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001441 }
aliguori06d55cc2008-11-18 20:24:06 +00001442 }
aliguori6e140f22008-11-18 20:37:55 +00001443 } else {
1444 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001445 }
1446 }
1447}
1448
pbrook6658ffb2007-03-16 23:58:11 +00001449/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1450 so these check for a hit then pass through to the normal out-of-line
1451 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001452static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001453 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001454{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001455 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1456 switch (size) {
1457 case 1: return ldub_phys(addr);
1458 case 2: return lduw_phys(addr);
1459 case 4: return ldl_phys(addr);
1460 default: abort();
1461 }
pbrook6658ffb2007-03-16 23:58:11 +00001462}
1463
Avi Kivitya8170e52012-10-23 12:30:10 +02001464static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001465 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001466{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001467 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1468 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001469 case 1:
1470 stb_phys(addr, val);
1471 break;
1472 case 2:
1473 stw_phys(addr, val);
1474 break;
1475 case 4:
1476 stl_phys(addr, val);
1477 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001478 default: abort();
1479 }
pbrook6658ffb2007-03-16 23:58:11 +00001480}
1481
Avi Kivity1ec9b902012-01-02 12:47:48 +02001482static const MemoryRegionOps watch_mem_ops = {
1483 .read = watch_mem_read,
1484 .write = watch_mem_write,
1485 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001486};
pbrook6658ffb2007-03-16 23:58:11 +00001487
Avi Kivitya8170e52012-10-23 12:30:10 +02001488static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001489 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001490{
Avi Kivity70c68e42012-01-02 12:32:48 +02001491 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001492 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001493 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001494#if defined(DEBUG_SUBPAGE)
1495 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1496 mmio, len, addr, idx);
1497#endif
blueswir1db7b5422007-05-26 17:36:03 +00001498
Avi Kivity5312bd82012-02-12 18:32:55 +02001499 section = &phys_sections[mmio->sub_section[idx]];
1500 addr += mmio->base;
1501 addr -= section->offset_within_address_space;
1502 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001503 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001504}
1505
Avi Kivitya8170e52012-10-23 12:30:10 +02001506static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001507 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001508{
Avi Kivity70c68e42012-01-02 12:32:48 +02001509 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001510 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001511 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001512#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001513 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1514 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001515 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001516#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001517
Avi Kivity5312bd82012-02-12 18:32:55 +02001518 section = &phys_sections[mmio->sub_section[idx]];
1519 addr += mmio->base;
1520 addr -= section->offset_within_address_space;
1521 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001522 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001523}
1524
Avi Kivity70c68e42012-01-02 12:32:48 +02001525static const MemoryRegionOps subpage_ops = {
1526 .read = subpage_read,
1527 .write = subpage_write,
1528 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001529};
1530
Avi Kivitya8170e52012-10-23 12:30:10 +02001531static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001532 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001533{
1534 ram_addr_t raddr = addr;
1535 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001536 switch (size) {
1537 case 1: return ldub_p(ptr);
1538 case 2: return lduw_p(ptr);
1539 case 4: return ldl_p(ptr);
1540 default: abort();
1541 }
Andreas Färber56384e82011-11-30 16:26:21 +01001542}
1543
Avi Kivitya8170e52012-10-23 12:30:10 +02001544static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001545 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001546{
1547 ram_addr_t raddr = addr;
1548 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001549 switch (size) {
1550 case 1: return stb_p(ptr, value);
1551 case 2: return stw_p(ptr, value);
1552 case 4: return stl_p(ptr, value);
1553 default: abort();
1554 }
Andreas Färber56384e82011-11-30 16:26:21 +01001555}
1556
Avi Kivityde712f92012-01-02 12:41:07 +02001557static const MemoryRegionOps subpage_ram_ops = {
1558 .read = subpage_ram_read,
1559 .write = subpage_ram_write,
1560 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001561};
1562
Anthony Liguoric227f092009-10-01 16:12:16 -05001563static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001564 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001565{
1566 int idx, eidx;
1567
1568 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1569 return -1;
1570 idx = SUBPAGE_IDX(start);
1571 eidx = SUBPAGE_IDX(end);
1572#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001573 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001574 mmio, start, end, idx, eidx, memory);
1575#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001576 if (memory_region_is_ram(phys_sections[section].mr)) {
1577 MemoryRegionSection new_section = phys_sections[section];
1578 new_section.mr = &io_mem_subpage_ram;
1579 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001580 }
blueswir1db7b5422007-05-26 17:36:03 +00001581 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001582 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001583 }
1584
1585 return 0;
1586}
1587
Avi Kivitya8170e52012-10-23 12:30:10 +02001588static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001589{
Anthony Liguoric227f092009-10-01 16:12:16 -05001590 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001591
Anthony Liguori7267c092011-08-20 22:09:37 -05001592 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001593
1594 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001595 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1596 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001597 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001598#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001599 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1600 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001601#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001602 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001603
1604 return mmio;
1605}
1606
Avi Kivity5312bd82012-02-12 18:32:55 +02001607static uint16_t dummy_section(MemoryRegion *mr)
1608{
1609 MemoryRegionSection section = {
1610 .mr = mr,
1611 .offset_within_address_space = 0,
1612 .offset_within_region = 0,
1613 .size = UINT64_MAX,
1614 };
1615
1616 return phys_section_add(&section);
1617}
1618
Avi Kivitya8170e52012-10-23 12:30:10 +02001619MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001620{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001621 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001622}
1623
Avi Kivitye9179ce2009-06-14 11:38:52 +03001624static void io_mem_init(void)
1625{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001626 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001627 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1628 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1629 "unassigned", UINT64_MAX);
1630 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1631 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001632 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1633 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001634 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1635 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001636}
1637
Avi Kivityac1970f2012-10-03 16:22:53 +02001638static void mem_begin(MemoryListener *listener)
1639{
1640 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1641
1642 destroy_all_mappings(d);
1643 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1644}
1645
Avi Kivity50c1e142012-02-08 21:36:02 +02001646static void core_begin(MemoryListener *listener)
1647{
Avi Kivity5312bd82012-02-12 18:32:55 +02001648 phys_sections_clear();
1649 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001650 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1651 phys_section_rom = dummy_section(&io_mem_rom);
1652 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001653}
1654
Avi Kivity1d711482012-10-02 18:54:45 +02001655static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001656{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001657 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001658
1659 /* since each CPU stores ram addresses in its TLB cache, we must
1660 reset the modified entries */
1661 /* XXX: slow ! */
1662 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1663 tlb_flush(env, 1);
1664 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001665}
1666
Avi Kivity93632742012-02-08 16:54:16 +02001667static void core_log_global_start(MemoryListener *listener)
1668{
1669 cpu_physical_memory_set_dirty_tracking(1);
1670}
1671
1672static void core_log_global_stop(MemoryListener *listener)
1673{
1674 cpu_physical_memory_set_dirty_tracking(0);
1675}
1676
Avi Kivity4855d412012-02-08 21:16:05 +02001677static void io_region_add(MemoryListener *listener,
1678 MemoryRegionSection *section)
1679{
Avi Kivitya2d33522012-03-05 17:40:12 +02001680 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1681
1682 mrio->mr = section->mr;
1683 mrio->offset = section->offset_within_region;
1684 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001685 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001686 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001687}
1688
1689static void io_region_del(MemoryListener *listener,
1690 MemoryRegionSection *section)
1691{
1692 isa_unassign_ioport(section->offset_within_address_space, section->size);
1693}
1694
Avi Kivity93632742012-02-08 16:54:16 +02001695static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001696 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001697 .log_global_start = core_log_global_start,
1698 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001699 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001700};
1701
Avi Kivity4855d412012-02-08 21:16:05 +02001702static MemoryListener io_memory_listener = {
1703 .region_add = io_region_add,
1704 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001705 .priority = 0,
1706};
1707
Avi Kivity1d711482012-10-02 18:54:45 +02001708static MemoryListener tcg_memory_listener = {
1709 .commit = tcg_commit,
1710};
1711
Avi Kivityac1970f2012-10-03 16:22:53 +02001712void address_space_init_dispatch(AddressSpace *as)
1713{
1714 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1715
1716 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1717 d->listener = (MemoryListener) {
1718 .begin = mem_begin,
1719 .region_add = mem_add,
1720 .region_nop = mem_add,
1721 .priority = 0,
1722 };
1723 as->dispatch = d;
1724 memory_listener_register(&d->listener, as);
1725}
1726
Avi Kivity83f3c252012-10-07 12:59:55 +02001727void address_space_destroy_dispatch(AddressSpace *as)
1728{
1729 AddressSpaceDispatch *d = as->dispatch;
1730
1731 memory_listener_unregister(&d->listener);
1732 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1733 g_free(d);
1734 as->dispatch = NULL;
1735}
1736
Avi Kivity62152b82011-07-26 14:26:14 +03001737static void memory_map_init(void)
1738{
Anthony Liguori7267c092011-08-20 22:09:37 -05001739 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001740 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001741 address_space_init(&address_space_memory, system_memory);
1742 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001743
Anthony Liguori7267c092011-08-20 22:09:37 -05001744 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001745 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001746 address_space_init(&address_space_io, system_io);
1747 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001748
Avi Kivityf6790af2012-10-02 20:13:51 +02001749 memory_listener_register(&core_memory_listener, &address_space_memory);
1750 memory_listener_register(&io_memory_listener, &address_space_io);
1751 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001752
1753 dma_context_init(&dma_context_memory, &address_space_memory,
1754 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001755}
1756
1757MemoryRegion *get_system_memory(void)
1758{
1759 return system_memory;
1760}
1761
Avi Kivity309cb472011-08-08 16:09:03 +03001762MemoryRegion *get_system_io(void)
1763{
1764 return system_io;
1765}
1766
pbrooke2eef172008-06-08 01:09:01 +00001767#endif /* !defined(CONFIG_USER_ONLY) */
1768
bellard13eb76e2004-01-24 15:23:36 +00001769/* physical memory access (slow version, mainly for debug) */
1770#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001771int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001772 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001773{
1774 int l, flags;
1775 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001776 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001777
1778 while (len > 0) {
1779 page = addr & TARGET_PAGE_MASK;
1780 l = (page + TARGET_PAGE_SIZE) - addr;
1781 if (l > len)
1782 l = len;
1783 flags = page_get_flags(page);
1784 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001785 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001786 if (is_write) {
1787 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001788 return -1;
bellard579a97f2007-11-11 14:26:47 +00001789 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001790 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001791 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001792 memcpy(p, buf, l);
1793 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001794 } else {
1795 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001796 return -1;
bellard579a97f2007-11-11 14:26:47 +00001797 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001798 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001799 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001800 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001801 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001802 }
1803 len -= l;
1804 buf += l;
1805 addr += l;
1806 }
Paul Brooka68fe892010-03-01 00:08:59 +00001807 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001808}
bellard8df1cd02005-01-28 22:37:22 +00001809
bellard13eb76e2004-01-24 15:23:36 +00001810#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001811
Avi Kivitya8170e52012-10-23 12:30:10 +02001812static void invalidate_and_set_dirty(hwaddr addr,
1813 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001814{
1815 if (!cpu_physical_memory_is_dirty(addr)) {
1816 /* invalidate code */
1817 tb_invalidate_phys_page_range(addr, addr + length, 0);
1818 /* set dirty bit */
1819 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1820 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001821 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001822}
1823
Avi Kivitya8170e52012-10-23 12:30:10 +02001824void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001825 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001826{
Avi Kivityac1970f2012-10-03 16:22:53 +02001827 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001828 int l;
bellard13eb76e2004-01-24 15:23:36 +00001829 uint8_t *ptr;
1830 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001831 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001832 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001833
bellard13eb76e2004-01-24 15:23:36 +00001834 while (len > 0) {
1835 page = addr & TARGET_PAGE_MASK;
1836 l = (page + TARGET_PAGE_SIZE) - addr;
1837 if (l > len)
1838 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001839 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001840
bellard13eb76e2004-01-24 15:23:36 +00001841 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001842 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001843 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001844 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001845 /* XXX: could force cpu_single_env to NULL to avoid
1846 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001847 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001848 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001849 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001850 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001851 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001852 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001853 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001854 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001855 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001856 l = 2;
1857 } else {
bellard1c213d12005-09-03 10:49:04 +00001858 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001859 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001860 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001861 l = 1;
1862 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001863 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001864 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001865 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001866 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001867 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001868 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001869 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001870 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001871 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001872 }
1873 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001874 if (!(memory_region_is_ram(section->mr) ||
1875 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001876 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001877 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001878 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001879 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001880 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001881 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001882 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001883 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001884 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001885 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001886 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001887 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001888 l = 2;
1889 } else {
bellard1c213d12005-09-03 10:49:04 +00001890 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001891 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001892 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001893 l = 1;
1894 }
1895 } else {
1896 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001897 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001898 + memory_region_section_addr(section,
1899 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001900 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001901 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001902 }
1903 }
1904 len -= l;
1905 buf += l;
1906 addr += l;
1907 }
1908}
bellard8df1cd02005-01-28 22:37:22 +00001909
Avi Kivitya8170e52012-10-23 12:30:10 +02001910void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001911 const uint8_t *buf, int len)
1912{
1913 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1914}
1915
1916/**
1917 * address_space_read: read from an address space.
1918 *
1919 * @as: #AddressSpace to be accessed
1920 * @addr: address within that address space
1921 * @buf: buffer with the data transferred
1922 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001923void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001924{
1925 address_space_rw(as, addr, buf, len, false);
1926}
1927
1928
Avi Kivitya8170e52012-10-23 12:30:10 +02001929void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001930 int len, int is_write)
1931{
1932 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1933}
1934
bellardd0ecd2a2006-04-23 17:14:48 +00001935/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001936void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001937 const uint8_t *buf, int len)
1938{
Avi Kivityac1970f2012-10-03 16:22:53 +02001939 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001940 int l;
1941 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001942 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001943 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001944
bellardd0ecd2a2006-04-23 17:14:48 +00001945 while (len > 0) {
1946 page = addr & TARGET_PAGE_MASK;
1947 l = (page + TARGET_PAGE_SIZE) - addr;
1948 if (l > len)
1949 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001950 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001951
Blue Swirlcc5bea62012-04-14 14:56:48 +00001952 if (!(memory_region_is_ram(section->mr) ||
1953 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001954 /* do nothing */
1955 } else {
1956 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001957 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001958 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00001959 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001960 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00001961 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001962 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001963 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00001964 }
1965 len -= l;
1966 buf += l;
1967 addr += l;
1968 }
1969}
1970
aliguori6d16c2f2009-01-22 16:59:11 +00001971typedef struct {
1972 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02001973 hwaddr addr;
1974 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00001975} BounceBuffer;
1976
1977static BounceBuffer bounce;
1978
aliguoriba223c22009-01-22 16:59:16 +00001979typedef struct MapClient {
1980 void *opaque;
1981 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00001982 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00001983} MapClient;
1984
Blue Swirl72cf2d42009-09-12 07:36:22 +00001985static QLIST_HEAD(map_client_list, MapClient) map_client_list
1986 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00001987
1988void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
1989{
Anthony Liguori7267c092011-08-20 22:09:37 -05001990 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00001991
1992 client->opaque = opaque;
1993 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001994 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00001995 return client;
1996}
1997
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001998static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00001999{
2000 MapClient *client = (MapClient *)_client;
2001
Blue Swirl72cf2d42009-09-12 07:36:22 +00002002 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002003 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002004}
2005
2006static void cpu_notify_map_clients(void)
2007{
2008 MapClient *client;
2009
Blue Swirl72cf2d42009-09-12 07:36:22 +00002010 while (!QLIST_EMPTY(&map_client_list)) {
2011 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002012 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002013 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002014 }
2015}
2016
aliguori6d16c2f2009-01-22 16:59:11 +00002017/* Map a physical memory region into a host virtual address.
2018 * May map a subset of the requested range, given by and returned in *plen.
2019 * May return NULL if resources needed to perform the mapping are exhausted.
2020 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002021 * Use cpu_register_map_client() to know when retrying the map operation is
2022 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002023 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002024void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002025 hwaddr addr,
2026 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002027 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002028{
Avi Kivityac1970f2012-10-03 16:22:53 +02002029 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002030 hwaddr len = *plen;
2031 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002032 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002033 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002034 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002035 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002036 ram_addr_t rlen;
2037 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002038
2039 while (len > 0) {
2040 page = addr & TARGET_PAGE_MASK;
2041 l = (page + TARGET_PAGE_SIZE) - addr;
2042 if (l > len)
2043 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002044 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002045
Avi Kivityf3705d52012-03-08 16:16:34 +02002046 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002047 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002048 break;
2049 }
2050 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2051 bounce.addr = addr;
2052 bounce.len = l;
2053 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002054 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002055 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002056
2057 *plen = l;
2058 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002059 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002060 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002061 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002062 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002063 }
aliguori6d16c2f2009-01-22 16:59:11 +00002064
2065 len -= l;
2066 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002067 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002068 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002069 rlen = todo;
2070 ret = qemu_ram_ptr_length(raddr, &rlen);
2071 *plen = rlen;
2072 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002073}
2074
Avi Kivityac1970f2012-10-03 16:22:53 +02002075/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002076 * Will also mark the memory as dirty if is_write == 1. access_len gives
2077 * the amount of memory that was actually read or written by the caller.
2078 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002079void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2080 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002081{
2082 if (buffer != bounce.buffer) {
2083 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002084 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002085 while (access_len) {
2086 unsigned l;
2087 l = TARGET_PAGE_SIZE;
2088 if (l > access_len)
2089 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002090 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002091 addr1 += l;
2092 access_len -= l;
2093 }
2094 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002095 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002096 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002097 }
aliguori6d16c2f2009-01-22 16:59:11 +00002098 return;
2099 }
2100 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002101 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002102 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002103 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002104 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002105 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002106}
bellardd0ecd2a2006-04-23 17:14:48 +00002107
Avi Kivitya8170e52012-10-23 12:30:10 +02002108void *cpu_physical_memory_map(hwaddr addr,
2109 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002110 int is_write)
2111{
2112 return address_space_map(&address_space_memory, addr, plen, is_write);
2113}
2114
Avi Kivitya8170e52012-10-23 12:30:10 +02002115void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2116 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002117{
2118 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2119}
2120
bellard8df1cd02005-01-28 22:37:22 +00002121/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002122static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002123 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002124{
bellard8df1cd02005-01-28 22:37:22 +00002125 uint8_t *ptr;
2126 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002127 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002128
Avi Kivityac1970f2012-10-03 16:22:53 +02002129 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002130
Blue Swirlcc5bea62012-04-14 14:56:48 +00002131 if (!(memory_region_is_ram(section->mr) ||
2132 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002133 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002134 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002135 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002136#if defined(TARGET_WORDS_BIGENDIAN)
2137 if (endian == DEVICE_LITTLE_ENDIAN) {
2138 val = bswap32(val);
2139 }
2140#else
2141 if (endian == DEVICE_BIG_ENDIAN) {
2142 val = bswap32(val);
2143 }
2144#endif
bellard8df1cd02005-01-28 22:37:22 +00002145 } else {
2146 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002147 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002148 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002149 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002150 switch (endian) {
2151 case DEVICE_LITTLE_ENDIAN:
2152 val = ldl_le_p(ptr);
2153 break;
2154 case DEVICE_BIG_ENDIAN:
2155 val = ldl_be_p(ptr);
2156 break;
2157 default:
2158 val = ldl_p(ptr);
2159 break;
2160 }
bellard8df1cd02005-01-28 22:37:22 +00002161 }
2162 return val;
2163}
2164
Avi Kivitya8170e52012-10-23 12:30:10 +02002165uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002166{
2167 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2168}
2169
Avi Kivitya8170e52012-10-23 12:30:10 +02002170uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002171{
2172 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2173}
2174
Avi Kivitya8170e52012-10-23 12:30:10 +02002175uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002176{
2177 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2178}
2179
bellard84b7b8e2005-11-28 21:19:04 +00002180/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002181static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002182 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002183{
bellard84b7b8e2005-11-28 21:19:04 +00002184 uint8_t *ptr;
2185 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002186 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002187
Avi Kivityac1970f2012-10-03 16:22:53 +02002188 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002189
Blue Swirlcc5bea62012-04-14 14:56:48 +00002190 if (!(memory_region_is_ram(section->mr) ||
2191 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002192 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002193 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002194
2195 /* XXX This is broken when device endian != cpu endian.
2196 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002197#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002198 val = io_mem_read(section->mr, addr, 4) << 32;
2199 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002200#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002201 val = io_mem_read(section->mr, addr, 4);
2202 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002203#endif
2204 } else {
2205 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002206 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002207 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002208 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002209 switch (endian) {
2210 case DEVICE_LITTLE_ENDIAN:
2211 val = ldq_le_p(ptr);
2212 break;
2213 case DEVICE_BIG_ENDIAN:
2214 val = ldq_be_p(ptr);
2215 break;
2216 default:
2217 val = ldq_p(ptr);
2218 break;
2219 }
bellard84b7b8e2005-11-28 21:19:04 +00002220 }
2221 return val;
2222}
2223
Avi Kivitya8170e52012-10-23 12:30:10 +02002224uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002225{
2226 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2227}
2228
Avi Kivitya8170e52012-10-23 12:30:10 +02002229uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002230{
2231 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2232}
2233
Avi Kivitya8170e52012-10-23 12:30:10 +02002234uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002235{
2236 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2237}
2238
bellardaab33092005-10-30 20:48:42 +00002239/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002240uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002241{
2242 uint8_t val;
2243 cpu_physical_memory_read(addr, &val, 1);
2244 return val;
2245}
2246
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002247/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002248static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002249 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002250{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002251 uint8_t *ptr;
2252 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002253 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002254
Avi Kivityac1970f2012-10-03 16:22:53 +02002255 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002256
Blue Swirlcc5bea62012-04-14 14:56:48 +00002257 if (!(memory_region_is_ram(section->mr) ||
2258 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002259 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002260 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002261 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002262#if defined(TARGET_WORDS_BIGENDIAN)
2263 if (endian == DEVICE_LITTLE_ENDIAN) {
2264 val = bswap16(val);
2265 }
2266#else
2267 if (endian == DEVICE_BIG_ENDIAN) {
2268 val = bswap16(val);
2269 }
2270#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002271 } else {
2272 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002273 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002274 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002275 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002276 switch (endian) {
2277 case DEVICE_LITTLE_ENDIAN:
2278 val = lduw_le_p(ptr);
2279 break;
2280 case DEVICE_BIG_ENDIAN:
2281 val = lduw_be_p(ptr);
2282 break;
2283 default:
2284 val = lduw_p(ptr);
2285 break;
2286 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002287 }
2288 return val;
bellardaab33092005-10-30 20:48:42 +00002289}
2290
Avi Kivitya8170e52012-10-23 12:30:10 +02002291uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002292{
2293 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2294}
2295
Avi Kivitya8170e52012-10-23 12:30:10 +02002296uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002297{
2298 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2299}
2300
Avi Kivitya8170e52012-10-23 12:30:10 +02002301uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002302{
2303 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2304}
2305
bellard8df1cd02005-01-28 22:37:22 +00002306/* warning: addr must be aligned. The ram page is not masked as dirty
2307 and the code inside is not invalidated. It is useful if the dirty
2308 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002309void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002310{
bellard8df1cd02005-01-28 22:37:22 +00002311 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002312 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002313
Avi Kivityac1970f2012-10-03 16:22:53 +02002314 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002315
Avi Kivityf3705d52012-03-08 16:16:34 +02002316 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002317 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002318 if (memory_region_is_ram(section->mr)) {
2319 section = &phys_sections[phys_section_rom];
2320 }
2321 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002322 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002323 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002324 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002325 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002326 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002327 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002328
2329 if (unlikely(in_migration)) {
2330 if (!cpu_physical_memory_is_dirty(addr1)) {
2331 /* invalidate code */
2332 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2333 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002334 cpu_physical_memory_set_dirty_flags(
2335 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002336 }
2337 }
bellard8df1cd02005-01-28 22:37:22 +00002338 }
2339}
2340
Avi Kivitya8170e52012-10-23 12:30:10 +02002341void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002342{
j_mayerbc98a7e2007-04-04 07:55:12 +00002343 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002344 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002345
Avi Kivityac1970f2012-10-03 16:22:53 +02002346 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002347
Avi Kivityf3705d52012-03-08 16:16:34 +02002348 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002349 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002350 if (memory_region_is_ram(section->mr)) {
2351 section = &phys_sections[phys_section_rom];
2352 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002353#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002354 io_mem_write(section->mr, addr, val >> 32, 4);
2355 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002356#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002357 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2358 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002359#endif
2360 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002361 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002362 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002363 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002364 stq_p(ptr, val);
2365 }
2366}
2367
bellard8df1cd02005-01-28 22:37:22 +00002368/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002369static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002370 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002371{
bellard8df1cd02005-01-28 22:37:22 +00002372 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002373 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002374
Avi Kivityac1970f2012-10-03 16:22:53 +02002375 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002376
Avi Kivityf3705d52012-03-08 16:16:34 +02002377 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002378 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002379 if (memory_region_is_ram(section->mr)) {
2380 section = &phys_sections[phys_section_rom];
2381 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002382#if defined(TARGET_WORDS_BIGENDIAN)
2383 if (endian == DEVICE_LITTLE_ENDIAN) {
2384 val = bswap32(val);
2385 }
2386#else
2387 if (endian == DEVICE_BIG_ENDIAN) {
2388 val = bswap32(val);
2389 }
2390#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002391 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002392 } else {
2393 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002394 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002395 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002396 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002397 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002398 switch (endian) {
2399 case DEVICE_LITTLE_ENDIAN:
2400 stl_le_p(ptr, val);
2401 break;
2402 case DEVICE_BIG_ENDIAN:
2403 stl_be_p(ptr, val);
2404 break;
2405 default:
2406 stl_p(ptr, val);
2407 break;
2408 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002409 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002410 }
2411}
2412
Avi Kivitya8170e52012-10-23 12:30:10 +02002413void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002414{
2415 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2416}
2417
Avi Kivitya8170e52012-10-23 12:30:10 +02002418void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002419{
2420 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2421}
2422
Avi Kivitya8170e52012-10-23 12:30:10 +02002423void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002424{
2425 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2426}
2427
bellardaab33092005-10-30 20:48:42 +00002428/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002429void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002430{
2431 uint8_t v = val;
2432 cpu_physical_memory_write(addr, &v, 1);
2433}
2434
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002435/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002436static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002437 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002438{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002439 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002440 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002441
Avi Kivityac1970f2012-10-03 16:22:53 +02002442 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002443
Avi Kivityf3705d52012-03-08 16:16:34 +02002444 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002445 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002446 if (memory_region_is_ram(section->mr)) {
2447 section = &phys_sections[phys_section_rom];
2448 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002449#if defined(TARGET_WORDS_BIGENDIAN)
2450 if (endian == DEVICE_LITTLE_ENDIAN) {
2451 val = bswap16(val);
2452 }
2453#else
2454 if (endian == DEVICE_BIG_ENDIAN) {
2455 val = bswap16(val);
2456 }
2457#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002458 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002459 } else {
2460 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002461 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002462 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002463 /* RAM case */
2464 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002465 switch (endian) {
2466 case DEVICE_LITTLE_ENDIAN:
2467 stw_le_p(ptr, val);
2468 break;
2469 case DEVICE_BIG_ENDIAN:
2470 stw_be_p(ptr, val);
2471 break;
2472 default:
2473 stw_p(ptr, val);
2474 break;
2475 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002476 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002477 }
bellardaab33092005-10-30 20:48:42 +00002478}
2479
Avi Kivitya8170e52012-10-23 12:30:10 +02002480void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002481{
2482 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2483}
2484
Avi Kivitya8170e52012-10-23 12:30:10 +02002485void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002486{
2487 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2488}
2489
Avi Kivitya8170e52012-10-23 12:30:10 +02002490void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002491{
2492 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2493}
2494
bellardaab33092005-10-30 20:48:42 +00002495/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002496void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002497{
2498 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002499 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002500}
2501
Avi Kivitya8170e52012-10-23 12:30:10 +02002502void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002503{
2504 val = cpu_to_le64(val);
2505 cpu_physical_memory_write(addr, &val, 8);
2506}
2507
Avi Kivitya8170e52012-10-23 12:30:10 +02002508void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002509{
2510 val = cpu_to_be64(val);
2511 cpu_physical_memory_write(addr, &val, 8);
2512}
2513
aliguori5e2972f2009-03-28 17:51:36 +00002514/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002515int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002516 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002517{
2518 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002519 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002520 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002521
2522 while (len > 0) {
2523 page = addr & TARGET_PAGE_MASK;
2524 phys_addr = cpu_get_phys_page_debug(env, page);
2525 /* if no physical page mapped, return an error */
2526 if (phys_addr == -1)
2527 return -1;
2528 l = (page + TARGET_PAGE_SIZE) - addr;
2529 if (l > len)
2530 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002531 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002532 if (is_write)
2533 cpu_physical_memory_write_rom(phys_addr, buf, l);
2534 else
aliguori5e2972f2009-03-28 17:51:36 +00002535 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002536 len -= l;
2537 buf += l;
2538 addr += l;
2539 }
2540 return 0;
2541}
Paul Brooka68fe892010-03-01 00:08:59 +00002542#endif
bellard13eb76e2004-01-24 15:23:36 +00002543
Paul Brookb3755a92010-03-12 16:54:58 +00002544#if !defined(CONFIG_USER_ONLY)
2545
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00002546/*
2547 * A helper function for the _utterly broken_ virtio device model to find out if
2548 * it's running on a big endian machine. Don't do this at home kids!
2549 */
2550bool virtio_is_big_endian(void);
2551bool virtio_is_big_endian(void)
2552{
2553#if defined(TARGET_WORDS_BIGENDIAN)
2554 return true;
2555#else
2556 return false;
2557#endif
2558}
2559
bellard61382a52003-10-27 21:22:23 +00002560#endif
Wen Congyang76f35532012-05-07 12:04:18 +08002561
2562#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002563bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002564{
2565 MemoryRegionSection *section;
2566
Avi Kivityac1970f2012-10-03 16:22:53 +02002567 section = phys_page_find(address_space_memory.dispatch,
2568 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002569
2570 return !(memory_region_is_ram(section->mr) ||
2571 memory_region_is_romd(section->mr));
2572}
2573#endif