blob: a6923addd48c6d0252c3296f3b560a5fa41d52b4 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
81int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
pbrook9656f322008-07-01 20:01:19 +0000222#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100226 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000231 tlb_flush(env, 1);
232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
pbrook9656f322008-07-01 20:01:19 +0000248#endif
249
Andreas Färber9349b4f2012-03-14 01:38:32 +0100250CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400251{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100252 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400253
254 while (env) {
255 if (env->cpu_index == cpu)
256 break;
257 env = env->next_cpu;
258 }
259
260 return env;
261}
262
Andreas Färber9349b4f2012-03-14 01:38:32 +0100263void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000264{
Andreas Färber9f09e182012-05-03 06:59:07 +0200265#ifndef CONFIG_USER_ONLY
266 CPUState *cpu = ENV_GET_CPU(env);
267#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100268 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000269 int cpu_index;
270
pbrookc2764712009-03-07 15:24:59 +0000271#if defined(CONFIG_USER_ONLY)
272 cpu_list_lock();
273#endif
bellard6a00d602005-11-21 23:25:50 +0000274 env->next_cpu = NULL;
275 penv = &first_cpu;
276 cpu_index = 0;
277 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700278 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000279 cpu_index++;
280 }
281 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000282 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000283 QTAILQ_INIT(&env->breakpoints);
284 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100285#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200286 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100287#endif
bellard6a00d602005-11-21 23:25:50 +0000288 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000289#if defined(CONFIG_USER_ONLY)
290 cpu_list_unlock();
291#endif
pbrookb3c77242008-06-30 16:31:04 +0000292#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600293 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
294 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000295 cpu_save, cpu_load, env);
296#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000297}
298
bellard1fddef42005-04-17 19:16:13 +0000299#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000300#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100301static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000302{
303 tb_invalidate_phys_page_range(pc, pc + 1, 0);
304}
305#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400306static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
307{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400308 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
309 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400310}
bellardc27004e2005-01-03 23:35:10 +0000311#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000312#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000313
Paul Brookc527ee82010-03-01 03:31:14 +0000314#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100315void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000316
317{
318}
319
Andreas Färber9349b4f2012-03-14 01:38:32 +0100320int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000321 int flags, CPUWatchpoint **watchpoint)
322{
323 return -ENOSYS;
324}
325#else
pbrook6658ffb2007-03-16 23:58:11 +0000326/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100327int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000328 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000329{
aliguorib4051332008-11-18 20:14:20 +0000330 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000331 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000332
aliguorib4051332008-11-18 20:14:20 +0000333 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400334 if ((len & (len - 1)) || (addr & ~len_mask) ||
335 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000336 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
337 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
338 return -EINVAL;
339 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500340 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000341
aliguoria1d1bb32008-11-18 20:07:32 +0000342 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000343 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000344 wp->flags = flags;
345
aliguori2dc9f412008-11-18 20:56:59 +0000346 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000347 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000348 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000349 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000350 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000351
pbrook6658ffb2007-03-16 23:58:11 +0000352 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000353
354 if (watchpoint)
355 *watchpoint = wp;
356 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000357}
358
aliguoria1d1bb32008-11-18 20:07:32 +0000359/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100360int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000361 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000362{
aliguorib4051332008-11-18 20:14:20 +0000363 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000364 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000365
Blue Swirl72cf2d42009-09-12 07:36:22 +0000366 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000367 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000368 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000369 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000370 return 0;
371 }
372 }
aliguoria1d1bb32008-11-18 20:07:32 +0000373 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000374}
375
aliguoria1d1bb32008-11-18 20:07:32 +0000376/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100377void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000378{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000379 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000380
aliguoria1d1bb32008-11-18 20:07:32 +0000381 tlb_flush_page(env, watchpoint->vaddr);
382
Anthony Liguori7267c092011-08-20 22:09:37 -0500383 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000384}
385
aliguoria1d1bb32008-11-18 20:07:32 +0000386/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100387void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000388{
aliguoric0ce9982008-11-25 22:13:57 +0000389 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000390
Blue Swirl72cf2d42009-09-12 07:36:22 +0000391 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000392 if (wp->flags & mask)
393 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000394 }
aliguoria1d1bb32008-11-18 20:07:32 +0000395}
Paul Brookc527ee82010-03-01 03:31:14 +0000396#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000397
398/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100399int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000400 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000401{
bellard1fddef42005-04-17 19:16:13 +0000402#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000403 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000404
Anthony Liguori7267c092011-08-20 22:09:37 -0500405 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000406
407 bp->pc = pc;
408 bp->flags = flags;
409
aliguori2dc9f412008-11-18 20:56:59 +0000410 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000411 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000412 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000413 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000414 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000415
416 breakpoint_invalidate(env, pc);
417
418 if (breakpoint)
419 *breakpoint = bp;
420 return 0;
421#else
422 return -ENOSYS;
423#endif
424}
425
426/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100427int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000428{
429#if defined(TARGET_HAS_ICE)
430 CPUBreakpoint *bp;
431
Blue Swirl72cf2d42009-09-12 07:36:22 +0000432 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000433 if (bp->pc == pc && bp->flags == flags) {
434 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000435 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000436 }
bellard4c3a88a2003-07-26 12:06:08 +0000437 }
aliguoria1d1bb32008-11-18 20:07:32 +0000438 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000439#else
aliguoria1d1bb32008-11-18 20:07:32 +0000440 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000441#endif
442}
443
aliguoria1d1bb32008-11-18 20:07:32 +0000444/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100445void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000446{
bellard1fddef42005-04-17 19:16:13 +0000447#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000448 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000449
aliguoria1d1bb32008-11-18 20:07:32 +0000450 breakpoint_invalidate(env, breakpoint->pc);
451
Anthony Liguori7267c092011-08-20 22:09:37 -0500452 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000453#endif
454}
455
456/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100457void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000458{
459#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000460 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000461
Blue Swirl72cf2d42009-09-12 07:36:22 +0000462 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000463 if (bp->flags & mask)
464 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000465 }
bellard4c3a88a2003-07-26 12:06:08 +0000466#endif
467}
468
bellardc33a3462003-07-29 20:50:33 +0000469/* enable or disable single step mode. EXCP_DEBUG is returned by the
470 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100471void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000472{
bellard1fddef42005-04-17 19:16:13 +0000473#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000474 if (env->singlestep_enabled != enabled) {
475 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000476 if (kvm_enabled())
477 kvm_update_guest_debug(env, 0);
478 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100479 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000480 /* XXX: only flush what is necessary */
481 tb_flush(env);
482 }
bellardc33a3462003-07-29 20:50:33 +0000483 }
484#endif
485}
486
Andreas Färber9349b4f2012-03-14 01:38:32 +0100487void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000488{
489 env->interrupt_request &= ~mask;
490}
491
Andreas Färber9349b4f2012-03-14 01:38:32 +0100492void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000493{
494 env->exit_request = 1;
495 cpu_unlink_tb(env);
496}
497
Andreas Färber9349b4f2012-03-14 01:38:32 +0100498void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000499{
500 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000501 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000502
503 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000504 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000505 fprintf(stderr, "qemu: fatal: ");
506 vfprintf(stderr, fmt, ap);
507 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100508 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000509 if (qemu_log_enabled()) {
510 qemu_log("qemu: fatal: ");
511 qemu_log_vprintf(fmt, ap2);
512 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100513 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000514 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000515 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000516 }
pbrook493ae1f2007-11-23 16:53:59 +0000517 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000518 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200519#if defined(CONFIG_USER_ONLY)
520 {
521 struct sigaction act;
522 sigfillset(&act.sa_mask);
523 act.sa_handler = SIG_DFL;
524 sigaction(SIGABRT, &act, NULL);
525 }
526#endif
bellard75012672003-06-21 13:11:07 +0000527 abort();
528}
529
Andreas Färber9349b4f2012-03-14 01:38:32 +0100530CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000531{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100532 CPUArchState *new_env = cpu_init(env->cpu_model_str);
533 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +0000534 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000535#if defined(TARGET_HAS_ICE)
536 CPUBreakpoint *bp;
537 CPUWatchpoint *wp;
538#endif
539
Andreas Färber9349b4f2012-03-14 01:38:32 +0100540 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000541
542 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +0000543 new_env->next_cpu = next_cpu;
544 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000545
546 /* Clone all break/watchpoints.
547 Note: Once we support ptrace with hw-debug register access, make sure
548 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000549 QTAILQ_INIT(&env->breakpoints);
550 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000551#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000552 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000553 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
554 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000555 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000556 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
557 wp->flags, NULL);
558 }
559#endif
560
thsc5be9f02007-02-28 20:20:53 +0000561 return new_env;
562}
563
bellard01243112004-01-04 15:48:17 +0000564#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200565static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
566 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000567{
Juan Quintelad24981d2012-05-22 00:42:40 +0200568 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000569
bellard1ccde1c2004-02-06 19:46:14 +0000570 /* we modify the TLB cache so that the dirty bit will be set again
571 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200572 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200573 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000574 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200575 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000576 != (end - 1) - start) {
577 abort();
578 }
Blue Swirle5548612012-04-21 13:08:33 +0000579 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200580
581}
582
583/* Note: start and end must be within the same ram block. */
584void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
585 int dirty_flags)
586{
587 uintptr_t length;
588
589 start &= TARGET_PAGE_MASK;
590 end = TARGET_PAGE_ALIGN(end);
591
592 length = end - start;
593 if (length == 0)
594 return;
595 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
596
597 if (tcg_enabled()) {
598 tlb_reset_dirty_range_all(start, end, length);
599 }
bellard1ccde1c2004-02-06 19:46:14 +0000600}
601
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000602static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000603{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200604 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000605 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200606 return ret;
aliguori74576192008-10-06 14:02:03 +0000607}
608
Avi Kivitya8170e52012-10-23 12:30:10 +0200609hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000610 MemoryRegionSection *section,
611 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200612 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000613 int prot,
614 target_ulong *address)
615{
Avi Kivitya8170e52012-10-23 12:30:10 +0200616 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000617 CPUWatchpoint *wp;
618
Blue Swirlcc5bea62012-04-14 14:56:48 +0000619 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000620 /* Normal RAM. */
621 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000622 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000623 if (!section->readonly) {
624 iotlb |= phys_section_notdirty;
625 } else {
626 iotlb |= phys_section_rom;
627 }
628 } else {
629 /* IO handlers are currently passed a physical address.
630 It would be nice to pass an offset from the base address
631 of that region. This would avoid having to special case RAM,
632 and avoid full address decoding in every device.
633 We can't use the high bits of pd for this because
634 IO_MEM_ROMD uses these as a ram address. */
635 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000636 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000637 }
638
639 /* Make accesses to pages with watchpoints go via the
640 watchpoint trap routines. */
641 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
642 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
643 /* Avoid trapping reads of pages with a write breakpoint. */
644 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
645 iotlb = phys_section_watch + paddr;
646 *address |= TLB_MMIO;
647 break;
648 }
649 }
650 }
651
652 return iotlb;
653}
bellard9fa3e852004-01-04 18:06:42 +0000654#endif /* defined(CONFIG_USER_ONLY) */
655
pbrooke2eef172008-06-08 01:09:01 +0000656#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000657
Paul Brookc04b2b72010-03-01 03:31:14 +0000658#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
659typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200660 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200661 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200662 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000663} subpage_t;
664
Anthony Liguoric227f092009-10-01 16:12:16 -0500665static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200666 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200667static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200668static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200669{
Avi Kivity5312bd82012-02-12 18:32:55 +0200670 MemoryRegionSection *section = &phys_sections[section_index];
671 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200672
673 if (mr->subpage) {
674 subpage_t *subpage = container_of(mr, subpage_t, iomem);
675 memory_region_destroy(&subpage->iomem);
676 g_free(subpage);
677 }
678}
679
Avi Kivity4346ae32012-02-10 17:00:01 +0200680static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200681{
682 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200683 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200684
Avi Kivityc19e8802012-02-13 20:25:31 +0200685 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200686 return;
687 }
688
Avi Kivityc19e8802012-02-13 20:25:31 +0200689 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200690 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200691 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200692 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200693 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200694 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200695 }
Avi Kivity54688b12012-02-09 17:34:32 +0200696 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200697 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200698 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200699}
700
Avi Kivityac1970f2012-10-03 16:22:53 +0200701static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200702{
Avi Kivityac1970f2012-10-03 16:22:53 +0200703 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200704 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200705}
706
Avi Kivity5312bd82012-02-12 18:32:55 +0200707static uint16_t phys_section_add(MemoryRegionSection *section)
708{
709 if (phys_sections_nb == phys_sections_nb_alloc) {
710 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
711 phys_sections = g_renew(MemoryRegionSection, phys_sections,
712 phys_sections_nb_alloc);
713 }
714 phys_sections[phys_sections_nb] = *section;
715 return phys_sections_nb++;
716}
717
718static void phys_sections_clear(void)
719{
720 phys_sections_nb = 0;
721}
722
Avi Kivityac1970f2012-10-03 16:22:53 +0200723static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200724{
725 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200726 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200727 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200728 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200729 MemoryRegionSection subsection = {
730 .offset_within_address_space = base,
731 .size = TARGET_PAGE_SIZE,
732 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200733 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200734
Avi Kivityf3705d52012-03-08 16:16:34 +0200735 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200736
Avi Kivityf3705d52012-03-08 16:16:34 +0200737 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200738 subpage = subpage_init(base);
739 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200740 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200741 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200742 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200743 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200744 }
745 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400746 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200747 subpage_register(subpage, start, end, phys_section_add(section));
748}
749
750
Avi Kivityac1970f2012-10-03 16:22:53 +0200751static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000752{
Avi Kivitya8170e52012-10-23 12:30:10 +0200753 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200754 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200755 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200756 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200757
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200758 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200759
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200760 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200761 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200762 section_index);
bellard33417e72003-08-10 21:47:01 +0000763}
764
Avi Kivityac1970f2012-10-03 16:22:53 +0200765static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200766{
Avi Kivityac1970f2012-10-03 16:22:53 +0200767 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200768 MemoryRegionSection now = *section, remain = *section;
769
770 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
771 || (now.size < TARGET_PAGE_SIZE)) {
772 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
773 - now.offset_within_address_space,
774 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200775 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200776 remain.size -= now.size;
777 remain.offset_within_address_space += now.size;
778 remain.offset_within_region += now.size;
779 }
Tyler Hall69b67642012-07-25 18:45:04 -0400780 while (remain.size >= TARGET_PAGE_SIZE) {
781 now = remain;
782 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
783 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200784 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400785 } else {
786 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200787 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400788 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200789 remain.size -= now.size;
790 remain.offset_within_address_space += now.size;
791 remain.offset_within_region += now.size;
792 }
793 now = remain;
794 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200795 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200796 }
797}
798
Sheng Yang62a27442010-01-26 19:21:16 +0800799void qemu_flush_coalesced_mmio_buffer(void)
800{
801 if (kvm_enabled())
802 kvm_flush_coalesced_mmio_buffer();
803}
804
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700805void qemu_mutex_lock_ramlist(void)
806{
807 qemu_mutex_lock(&ram_list.mutex);
808}
809
810void qemu_mutex_unlock_ramlist(void)
811{
812 qemu_mutex_unlock(&ram_list.mutex);
813}
814
Marcelo Tosattic9027602010-03-01 20:25:08 -0300815#if defined(__linux__) && !defined(TARGET_S390X)
816
817#include <sys/vfs.h>
818
819#define HUGETLBFS_MAGIC 0x958458f6
820
821static long gethugepagesize(const char *path)
822{
823 struct statfs fs;
824 int ret;
825
826 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900827 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300828 } while (ret != 0 && errno == EINTR);
829
830 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900831 perror(path);
832 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300833 }
834
835 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900836 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300837
838 return fs.f_bsize;
839}
840
Alex Williamson04b16652010-07-02 11:13:17 -0600841static void *file_ram_alloc(RAMBlock *block,
842 ram_addr_t memory,
843 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300844{
845 char *filename;
846 void *area;
847 int fd;
848#ifdef MAP_POPULATE
849 int flags;
850#endif
851 unsigned long hpagesize;
852
853 hpagesize = gethugepagesize(path);
854 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900855 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300856 }
857
858 if (memory < hpagesize) {
859 return NULL;
860 }
861
862 if (kvm_enabled() && !kvm_has_sync_mmu()) {
863 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
864 return NULL;
865 }
866
867 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900868 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300869 }
870
871 fd = mkstemp(filename);
872 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900873 perror("unable to create backing store for hugepages");
874 free(filename);
875 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300876 }
877 unlink(filename);
878 free(filename);
879
880 memory = (memory+hpagesize-1) & ~(hpagesize-1);
881
882 /*
883 * ftruncate is not supported by hugetlbfs in older
884 * hosts, so don't bother bailing out on errors.
885 * If anything goes wrong with it under other filesystems,
886 * mmap will fail.
887 */
888 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900889 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300890
891#ifdef MAP_POPULATE
892 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
893 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
894 * to sidestep this quirk.
895 */
896 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
897 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
898#else
899 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
900#endif
901 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900902 perror("file_ram_alloc: can't mmap RAM pages");
903 close(fd);
904 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300905 }
Alex Williamson04b16652010-07-02 11:13:17 -0600906 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300907 return area;
908}
909#endif
910
Alex Williamsond17b5282010-06-25 11:08:38 -0600911static ram_addr_t find_ram_offset(ram_addr_t size)
912{
Alex Williamson04b16652010-07-02 11:13:17 -0600913 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600914 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600915
Paolo Bonzinia3161032012-11-14 15:54:48 +0100916 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600917 return 0;
918
Paolo Bonzinia3161032012-11-14 15:54:48 +0100919 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000920 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600921
922 end = block->offset + block->length;
923
Paolo Bonzinia3161032012-11-14 15:54:48 +0100924 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600925 if (next_block->offset >= end) {
926 next = MIN(next, next_block->offset);
927 }
928 }
929 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600930 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600931 mingap = next - end;
932 }
933 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600934
935 if (offset == RAM_ADDR_MAX) {
936 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
937 (uint64_t)size);
938 abort();
939 }
940
Alex Williamson04b16652010-07-02 11:13:17 -0600941 return offset;
942}
943
Juan Quintela652d7ec2012-07-20 10:37:54 +0200944ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600945{
Alex Williamsond17b5282010-06-25 11:08:38 -0600946 RAMBlock *block;
947 ram_addr_t last = 0;
948
Paolo Bonzinia3161032012-11-14 15:54:48 +0100949 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600950 last = MAX(last, block->offset + block->length);
951
952 return last;
953}
954
Jason Baronddb97f12012-08-02 15:44:16 -0400955static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
956{
957 int ret;
958 QemuOpts *machine_opts;
959
960 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
961 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
962 if (machine_opts &&
963 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
964 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
965 if (ret) {
966 perror("qemu_madvise");
967 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
968 "but dump_guest_core=off specified\n");
969 }
970 }
971}
972
Avi Kivityc5705a72011-12-20 15:59:12 +0200973void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600974{
975 RAMBlock *new_block, *block;
976
Avi Kivityc5705a72011-12-20 15:59:12 +0200977 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100978 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200979 if (block->offset == addr) {
980 new_block = block;
981 break;
982 }
983 }
984 assert(new_block);
985 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600986
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600987 if (dev) {
988 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600989 if (id) {
990 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500991 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600992 }
993 }
994 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
995
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700996 /* This assumes the iothread lock is taken here too. */
997 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +0100998 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200999 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001000 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1001 new_block->idstr);
1002 abort();
1003 }
1004 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001005 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001006}
1007
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001008static int memory_try_enable_merging(void *addr, size_t len)
1009{
1010 QemuOpts *opts;
1011
1012 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1013 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1014 /* disabled by the user */
1015 return 0;
1016 }
1017
1018 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1019}
1020
Avi Kivityc5705a72011-12-20 15:59:12 +02001021ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1022 MemoryRegion *mr)
1023{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001024 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001025
1026 size = TARGET_PAGE_ALIGN(size);
1027 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001028
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001029 /* This assumes the iothread lock is taken here too. */
1030 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001031 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001032 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001033 if (host) {
1034 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001035 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001036 } else {
1037 if (mem_path) {
1038#if defined (__linux__) && !defined(TARGET_S390X)
1039 new_block->host = file_ram_alloc(new_block, size, mem_path);
1040 if (!new_block->host) {
1041 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001042 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001043 }
1044#else
1045 fprintf(stderr, "-mem-path option unsupported\n");
1046 exit(1);
1047#endif
1048 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001049 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001050 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001051 } else if (kvm_enabled()) {
1052 /* some s390/kvm configurations have special constraints */
1053 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001054 } else {
1055 new_block->host = qemu_vmalloc(size);
1056 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001057 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001058 }
1059 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001060 new_block->length = size;
1061
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001062 /* Keep the list sorted from biggest to smallest block. */
1063 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1064 if (block->length < new_block->length) {
1065 break;
1066 }
1067 }
1068 if (block) {
1069 QTAILQ_INSERT_BEFORE(block, new_block, next);
1070 } else {
1071 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1072 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001073 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001074
Umesh Deshpandef798b072011-08-18 11:41:17 -07001075 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001076 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001077
Anthony Liguori7267c092011-08-20 22:09:37 -05001078 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001079 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001080 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1081 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001082 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001083
Jason Baronddb97f12012-08-02 15:44:16 -04001084 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001085 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001086
Cam Macdonell84b89d72010-07-26 18:10:57 -06001087 if (kvm_enabled())
1088 kvm_setup_guest_memory(new_block->host, size);
1089
1090 return new_block->offset;
1091}
1092
Avi Kivityc5705a72011-12-20 15:59:12 +02001093ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001094{
Avi Kivityc5705a72011-12-20 15:59:12 +02001095 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001096}
bellarde9a1ab12007-02-08 23:08:38 +00001097
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001098void qemu_ram_free_from_ptr(ram_addr_t addr)
1099{
1100 RAMBlock *block;
1101
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001102 /* This assumes the iothread lock is taken here too. */
1103 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001104 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001105 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001106 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001107 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001108 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001109 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001110 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001111 }
1112 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001113 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001114}
1115
Anthony Liguoric227f092009-10-01 16:12:16 -05001116void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001117{
Alex Williamson04b16652010-07-02 11:13:17 -06001118 RAMBlock *block;
1119
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001120 /* This assumes the iothread lock is taken here too. */
1121 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001122 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001123 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001124 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001125 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001126 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001127 if (block->flags & RAM_PREALLOC_MASK) {
1128 ;
1129 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001130#if defined (__linux__) && !defined(TARGET_S390X)
1131 if (block->fd) {
1132 munmap(block->host, block->length);
1133 close(block->fd);
1134 } else {
1135 qemu_vfree(block->host);
1136 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001137#else
1138 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001139#endif
1140 } else {
1141#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1142 munmap(block->host, block->length);
1143#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001144 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001145 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001146 } else {
1147 qemu_vfree(block->host);
1148 }
Alex Williamson04b16652010-07-02 11:13:17 -06001149#endif
1150 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001151 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001152 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001153 }
1154 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001155 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001156
bellarde9a1ab12007-02-08 23:08:38 +00001157}
1158
Huang Yingcd19cfa2011-03-02 08:56:19 +01001159#ifndef _WIN32
1160void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1161{
1162 RAMBlock *block;
1163 ram_addr_t offset;
1164 int flags;
1165 void *area, *vaddr;
1166
Paolo Bonzinia3161032012-11-14 15:54:48 +01001167 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001168 offset = addr - block->offset;
1169 if (offset < block->length) {
1170 vaddr = block->host + offset;
1171 if (block->flags & RAM_PREALLOC_MASK) {
1172 ;
1173 } else {
1174 flags = MAP_FIXED;
1175 munmap(vaddr, length);
1176 if (mem_path) {
1177#if defined(__linux__) && !defined(TARGET_S390X)
1178 if (block->fd) {
1179#ifdef MAP_POPULATE
1180 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1181 MAP_PRIVATE;
1182#else
1183 flags |= MAP_PRIVATE;
1184#endif
1185 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1186 flags, block->fd, offset);
1187 } else {
1188 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1189 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1190 flags, -1, 0);
1191 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001192#else
1193 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001194#endif
1195 } else {
1196#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1197 flags |= MAP_SHARED | MAP_ANONYMOUS;
1198 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1199 flags, -1, 0);
1200#else
1201 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1202 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1203 flags, -1, 0);
1204#endif
1205 }
1206 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001207 fprintf(stderr, "Could not remap addr: "
1208 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001209 length, addr);
1210 exit(1);
1211 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001212 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001213 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001214 }
1215 return;
1216 }
1217 }
1218}
1219#endif /* !_WIN32 */
1220
pbrookdc828ca2009-04-09 22:21:07 +00001221/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001222 With the exception of the softmmu code in this file, this should
1223 only be used for local memory (e.g. video ram) that the device owns,
1224 and knows it isn't going to access beyond the end of the block.
1225
1226 It should not be used for general purpose DMA.
1227 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1228 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001229void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001230{
pbrook94a6b542009-04-11 17:15:54 +00001231 RAMBlock *block;
1232
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001233 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001234 block = ram_list.mru_block;
1235 if (block && addr - block->offset < block->length) {
1236 goto found;
1237 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001238 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001239 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001240 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001241 }
pbrook94a6b542009-04-11 17:15:54 +00001242 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001243
1244 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1245 abort();
1246
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001247found:
1248 ram_list.mru_block = block;
1249 if (xen_enabled()) {
1250 /* We need to check if the requested address is in the RAM
1251 * because we don't want to map the entire memory in QEMU.
1252 * In that case just map until the end of the page.
1253 */
1254 if (block->offset == 0) {
1255 return xen_map_cache(addr, 0, 0);
1256 } else if (block->host == NULL) {
1257 block->host =
1258 xen_map_cache(block->offset, block->length, 1);
1259 }
1260 }
1261 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001262}
1263
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001264/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1265 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1266 *
1267 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001268 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001269static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001270{
1271 RAMBlock *block;
1272
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001273 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001274 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001275 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001276 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001277 /* We need to check if the requested address is in the RAM
1278 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001279 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001280 */
1281 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001282 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001283 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001284 block->host =
1285 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001286 }
1287 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001288 return block->host + (addr - block->offset);
1289 }
1290 }
1291
1292 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1293 abort();
1294
1295 return NULL;
1296}
1297
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001298/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1299 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001300static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001301{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001302 if (*size == 0) {
1303 return NULL;
1304 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001305 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001306 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001307 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001308 RAMBlock *block;
1309
Paolo Bonzinia3161032012-11-14 15:54:48 +01001310 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001311 if (addr - block->offset < block->length) {
1312 if (addr - block->offset + *size > block->length)
1313 *size = block->length - addr + block->offset;
1314 return block->host + (addr - block->offset);
1315 }
1316 }
1317
1318 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1319 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001320 }
1321}
1322
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001323void qemu_put_ram_ptr(void *addr)
1324{
1325 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001326}
1327
Marcelo Tosattie8902612010-10-11 15:31:19 -03001328int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001329{
pbrook94a6b542009-04-11 17:15:54 +00001330 RAMBlock *block;
1331 uint8_t *host = ptr;
1332
Jan Kiszka868bb332011-06-21 22:59:09 +02001333 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001334 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001335 return 0;
1336 }
1337
Paolo Bonzinia3161032012-11-14 15:54:48 +01001338 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001339 /* This case append when the block is not mapped. */
1340 if (block->host == NULL) {
1341 continue;
1342 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001343 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001344 *ram_addr = block->offset + (host - block->host);
1345 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001346 }
pbrook94a6b542009-04-11 17:15:54 +00001347 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001348
Marcelo Tosattie8902612010-10-11 15:31:19 -03001349 return -1;
1350}
Alex Williamsonf471a172010-06-11 11:11:42 -06001351
Marcelo Tosattie8902612010-10-11 15:31:19 -03001352/* Some of the softmmu routines need to translate from a host pointer
1353 (typically a TLB entry) back to a ram offset. */
1354ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1355{
1356 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001357
Marcelo Tosattie8902612010-10-11 15:31:19 -03001358 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1359 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1360 abort();
1361 }
1362 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001363}
1364
Avi Kivitya8170e52012-10-23 12:30:10 +02001365static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001366 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001367{
pbrook67d3b952006-12-18 05:03:52 +00001368#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001369 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001370#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001371#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001372 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001373#endif
1374 return 0;
1375}
1376
Avi Kivitya8170e52012-10-23 12:30:10 +02001377static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001378 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001379{
1380#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001381 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001382#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001383#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001384 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001385#endif
1386}
1387
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001388static const MemoryRegionOps unassigned_mem_ops = {
1389 .read = unassigned_mem_read,
1390 .write = unassigned_mem_write,
1391 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001392};
1393
Avi Kivitya8170e52012-10-23 12:30:10 +02001394static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001395 unsigned size)
1396{
1397 abort();
1398}
1399
Avi Kivitya8170e52012-10-23 12:30:10 +02001400static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001401 uint64_t value, unsigned size)
1402{
1403 abort();
1404}
1405
1406static const MemoryRegionOps error_mem_ops = {
1407 .read = error_mem_read,
1408 .write = error_mem_write,
1409 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001410};
1411
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001412static const MemoryRegionOps rom_mem_ops = {
1413 .read = error_mem_read,
1414 .write = unassigned_mem_write,
1415 .endianness = DEVICE_NATIVE_ENDIAN,
1416};
1417
Avi Kivitya8170e52012-10-23 12:30:10 +02001418static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001419 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001420{
bellard3a7d9292005-08-21 09:26:42 +00001421 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001422 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001423 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1424#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001425 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001426 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001427#endif
1428 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001429 switch (size) {
1430 case 1:
1431 stb_p(qemu_get_ram_ptr(ram_addr), val);
1432 break;
1433 case 2:
1434 stw_p(qemu_get_ram_ptr(ram_addr), val);
1435 break;
1436 case 4:
1437 stl_p(qemu_get_ram_ptr(ram_addr), val);
1438 break;
1439 default:
1440 abort();
1441 }
bellardf23db162005-08-21 19:12:28 +00001442 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001443 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001444 /* we remove the notdirty callback only if the code has been
1445 flushed */
1446 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001447 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001448}
1449
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001450static const MemoryRegionOps notdirty_mem_ops = {
1451 .read = error_mem_read,
1452 .write = notdirty_mem_write,
1453 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001454};
1455
pbrook0f459d12008-06-09 00:20:13 +00001456/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001457static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001458{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001459 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001460 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001461 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001462 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001463 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001464
aliguori06d55cc2008-11-18 20:24:06 +00001465 if (env->watchpoint_hit) {
1466 /* We re-entered the check after replacing the TB. Now raise
1467 * the debug interrupt so that is will trigger after the
1468 * current instruction. */
1469 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1470 return;
1471 }
pbrook2e70f6e2008-06-29 01:03:05 +00001472 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001473 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001474 if ((vaddr == (wp->vaddr & len_mask) ||
1475 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001476 wp->flags |= BP_WATCHPOINT_HIT;
1477 if (!env->watchpoint_hit) {
1478 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001479 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001480 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1481 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001482 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001483 } else {
1484 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1485 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001486 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001487 }
aliguori06d55cc2008-11-18 20:24:06 +00001488 }
aliguori6e140f22008-11-18 20:37:55 +00001489 } else {
1490 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001491 }
1492 }
1493}
1494
pbrook6658ffb2007-03-16 23:58:11 +00001495/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1496 so these check for a hit then pass through to the normal out-of-line
1497 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001498static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001499 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001500{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001501 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1502 switch (size) {
1503 case 1: return ldub_phys(addr);
1504 case 2: return lduw_phys(addr);
1505 case 4: return ldl_phys(addr);
1506 default: abort();
1507 }
pbrook6658ffb2007-03-16 23:58:11 +00001508}
1509
Avi Kivitya8170e52012-10-23 12:30:10 +02001510static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001511 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001512{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001513 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1514 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001515 case 1:
1516 stb_phys(addr, val);
1517 break;
1518 case 2:
1519 stw_phys(addr, val);
1520 break;
1521 case 4:
1522 stl_phys(addr, val);
1523 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001524 default: abort();
1525 }
pbrook6658ffb2007-03-16 23:58:11 +00001526}
1527
Avi Kivity1ec9b902012-01-02 12:47:48 +02001528static const MemoryRegionOps watch_mem_ops = {
1529 .read = watch_mem_read,
1530 .write = watch_mem_write,
1531 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001532};
pbrook6658ffb2007-03-16 23:58:11 +00001533
Avi Kivitya8170e52012-10-23 12:30:10 +02001534static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001535 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001536{
Avi Kivity70c68e42012-01-02 12:32:48 +02001537 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001538 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001539 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001540#if defined(DEBUG_SUBPAGE)
1541 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1542 mmio, len, addr, idx);
1543#endif
blueswir1db7b5422007-05-26 17:36:03 +00001544
Avi Kivity5312bd82012-02-12 18:32:55 +02001545 section = &phys_sections[mmio->sub_section[idx]];
1546 addr += mmio->base;
1547 addr -= section->offset_within_address_space;
1548 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001549 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001550}
1551
Avi Kivitya8170e52012-10-23 12:30:10 +02001552static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001553 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001554{
Avi Kivity70c68e42012-01-02 12:32:48 +02001555 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001556 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001557 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001558#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001559 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1560 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001561 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001562#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001563
Avi Kivity5312bd82012-02-12 18:32:55 +02001564 section = &phys_sections[mmio->sub_section[idx]];
1565 addr += mmio->base;
1566 addr -= section->offset_within_address_space;
1567 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001568 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001569}
1570
Avi Kivity70c68e42012-01-02 12:32:48 +02001571static const MemoryRegionOps subpage_ops = {
1572 .read = subpage_read,
1573 .write = subpage_write,
1574 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001575};
1576
Avi Kivitya8170e52012-10-23 12:30:10 +02001577static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001578 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001579{
1580 ram_addr_t raddr = addr;
1581 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001582 switch (size) {
1583 case 1: return ldub_p(ptr);
1584 case 2: return lduw_p(ptr);
1585 case 4: return ldl_p(ptr);
1586 default: abort();
1587 }
Andreas Färber56384e82011-11-30 16:26:21 +01001588}
1589
Avi Kivitya8170e52012-10-23 12:30:10 +02001590static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001591 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001592{
1593 ram_addr_t raddr = addr;
1594 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001595 switch (size) {
1596 case 1: return stb_p(ptr, value);
1597 case 2: return stw_p(ptr, value);
1598 case 4: return stl_p(ptr, value);
1599 default: abort();
1600 }
Andreas Färber56384e82011-11-30 16:26:21 +01001601}
1602
Avi Kivityde712f92012-01-02 12:41:07 +02001603static const MemoryRegionOps subpage_ram_ops = {
1604 .read = subpage_ram_read,
1605 .write = subpage_ram_write,
1606 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001607};
1608
Anthony Liguoric227f092009-10-01 16:12:16 -05001609static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001610 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001611{
1612 int idx, eidx;
1613
1614 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1615 return -1;
1616 idx = SUBPAGE_IDX(start);
1617 eidx = SUBPAGE_IDX(end);
1618#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001619 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001620 mmio, start, end, idx, eidx, memory);
1621#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001622 if (memory_region_is_ram(phys_sections[section].mr)) {
1623 MemoryRegionSection new_section = phys_sections[section];
1624 new_section.mr = &io_mem_subpage_ram;
1625 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001626 }
blueswir1db7b5422007-05-26 17:36:03 +00001627 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001628 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001629 }
1630
1631 return 0;
1632}
1633
Avi Kivitya8170e52012-10-23 12:30:10 +02001634static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001635{
Anthony Liguoric227f092009-10-01 16:12:16 -05001636 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001637
Anthony Liguori7267c092011-08-20 22:09:37 -05001638 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001639
1640 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001641 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1642 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001643 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001644#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001645 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1646 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001647#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001648 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001649
1650 return mmio;
1651}
1652
Avi Kivity5312bd82012-02-12 18:32:55 +02001653static uint16_t dummy_section(MemoryRegion *mr)
1654{
1655 MemoryRegionSection section = {
1656 .mr = mr,
1657 .offset_within_address_space = 0,
1658 .offset_within_region = 0,
1659 .size = UINT64_MAX,
1660 };
1661
1662 return phys_section_add(&section);
1663}
1664
Avi Kivitya8170e52012-10-23 12:30:10 +02001665MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001666{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001667 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001668}
1669
Avi Kivitye9179ce2009-06-14 11:38:52 +03001670static void io_mem_init(void)
1671{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001672 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001673 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1674 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1675 "unassigned", UINT64_MAX);
1676 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1677 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001678 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1679 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001680 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1681 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001682}
1683
Avi Kivityac1970f2012-10-03 16:22:53 +02001684static void mem_begin(MemoryListener *listener)
1685{
1686 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1687
1688 destroy_all_mappings(d);
1689 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1690}
1691
Avi Kivity50c1e142012-02-08 21:36:02 +02001692static void core_begin(MemoryListener *listener)
1693{
Avi Kivity5312bd82012-02-12 18:32:55 +02001694 phys_sections_clear();
1695 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001696 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1697 phys_section_rom = dummy_section(&io_mem_rom);
1698 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001699}
1700
Avi Kivity1d711482012-10-02 18:54:45 +02001701static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001702{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001703 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001704
1705 /* since each CPU stores ram addresses in its TLB cache, we must
1706 reset the modified entries */
1707 /* XXX: slow ! */
1708 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1709 tlb_flush(env, 1);
1710 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001711}
1712
Avi Kivity93632742012-02-08 16:54:16 +02001713static void core_log_global_start(MemoryListener *listener)
1714{
1715 cpu_physical_memory_set_dirty_tracking(1);
1716}
1717
1718static void core_log_global_stop(MemoryListener *listener)
1719{
1720 cpu_physical_memory_set_dirty_tracking(0);
1721}
1722
Avi Kivity4855d412012-02-08 21:16:05 +02001723static void io_region_add(MemoryListener *listener,
1724 MemoryRegionSection *section)
1725{
Avi Kivitya2d33522012-03-05 17:40:12 +02001726 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1727
1728 mrio->mr = section->mr;
1729 mrio->offset = section->offset_within_region;
1730 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001731 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001732 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001733}
1734
1735static void io_region_del(MemoryListener *listener,
1736 MemoryRegionSection *section)
1737{
1738 isa_unassign_ioport(section->offset_within_address_space, section->size);
1739}
1740
Avi Kivity93632742012-02-08 16:54:16 +02001741static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001742 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001743 .log_global_start = core_log_global_start,
1744 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001745 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001746};
1747
Avi Kivity4855d412012-02-08 21:16:05 +02001748static MemoryListener io_memory_listener = {
1749 .region_add = io_region_add,
1750 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001751 .priority = 0,
1752};
1753
Avi Kivity1d711482012-10-02 18:54:45 +02001754static MemoryListener tcg_memory_listener = {
1755 .commit = tcg_commit,
1756};
1757
Avi Kivityac1970f2012-10-03 16:22:53 +02001758void address_space_init_dispatch(AddressSpace *as)
1759{
1760 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1761
1762 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1763 d->listener = (MemoryListener) {
1764 .begin = mem_begin,
1765 .region_add = mem_add,
1766 .region_nop = mem_add,
1767 .priority = 0,
1768 };
1769 as->dispatch = d;
1770 memory_listener_register(&d->listener, as);
1771}
1772
Avi Kivity83f3c252012-10-07 12:59:55 +02001773void address_space_destroy_dispatch(AddressSpace *as)
1774{
1775 AddressSpaceDispatch *d = as->dispatch;
1776
1777 memory_listener_unregister(&d->listener);
1778 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1779 g_free(d);
1780 as->dispatch = NULL;
1781}
1782
Avi Kivity62152b82011-07-26 14:26:14 +03001783static void memory_map_init(void)
1784{
Anthony Liguori7267c092011-08-20 22:09:37 -05001785 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001786 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001787 address_space_init(&address_space_memory, system_memory);
1788 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001789
Anthony Liguori7267c092011-08-20 22:09:37 -05001790 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001791 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001792 address_space_init(&address_space_io, system_io);
1793 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001794
Avi Kivityf6790af2012-10-02 20:13:51 +02001795 memory_listener_register(&core_memory_listener, &address_space_memory);
1796 memory_listener_register(&io_memory_listener, &address_space_io);
1797 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001798
1799 dma_context_init(&dma_context_memory, &address_space_memory,
1800 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001801}
1802
1803MemoryRegion *get_system_memory(void)
1804{
1805 return system_memory;
1806}
1807
Avi Kivity309cb472011-08-08 16:09:03 +03001808MemoryRegion *get_system_io(void)
1809{
1810 return system_io;
1811}
1812
pbrooke2eef172008-06-08 01:09:01 +00001813#endif /* !defined(CONFIG_USER_ONLY) */
1814
bellard13eb76e2004-01-24 15:23:36 +00001815/* physical memory access (slow version, mainly for debug) */
1816#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001817int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001818 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001819{
1820 int l, flags;
1821 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001822 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001823
1824 while (len > 0) {
1825 page = addr & TARGET_PAGE_MASK;
1826 l = (page + TARGET_PAGE_SIZE) - addr;
1827 if (l > len)
1828 l = len;
1829 flags = page_get_flags(page);
1830 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001831 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001832 if (is_write) {
1833 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001834 return -1;
bellard579a97f2007-11-11 14:26:47 +00001835 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001836 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001837 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001838 memcpy(p, buf, l);
1839 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001840 } else {
1841 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001842 return -1;
bellard579a97f2007-11-11 14:26:47 +00001843 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001844 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001845 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001846 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001847 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001848 }
1849 len -= l;
1850 buf += l;
1851 addr += l;
1852 }
Paul Brooka68fe892010-03-01 00:08:59 +00001853 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001854}
bellard8df1cd02005-01-28 22:37:22 +00001855
bellard13eb76e2004-01-24 15:23:36 +00001856#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001857
Avi Kivitya8170e52012-10-23 12:30:10 +02001858static void invalidate_and_set_dirty(hwaddr addr,
1859 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001860{
1861 if (!cpu_physical_memory_is_dirty(addr)) {
1862 /* invalidate code */
1863 tb_invalidate_phys_page_range(addr, addr + length, 0);
1864 /* set dirty bit */
1865 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1866 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001867 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001868}
1869
Avi Kivitya8170e52012-10-23 12:30:10 +02001870void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001871 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001872{
Avi Kivityac1970f2012-10-03 16:22:53 +02001873 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001874 int l;
bellard13eb76e2004-01-24 15:23:36 +00001875 uint8_t *ptr;
1876 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001877 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001878 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001879
bellard13eb76e2004-01-24 15:23:36 +00001880 while (len > 0) {
1881 page = addr & TARGET_PAGE_MASK;
1882 l = (page + TARGET_PAGE_SIZE) - addr;
1883 if (l > len)
1884 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001885 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001886
bellard13eb76e2004-01-24 15:23:36 +00001887 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001888 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001889 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001890 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001891 /* XXX: could force cpu_single_env to NULL to avoid
1892 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001893 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001894 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001895 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001896 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001897 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001898 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001899 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001900 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001901 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001902 l = 2;
1903 } else {
bellard1c213d12005-09-03 10:49:04 +00001904 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001905 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001906 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001907 l = 1;
1908 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001909 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001910 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001911 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001912 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001913 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001914 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001915 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001916 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001917 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001918 }
1919 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001920 if (!(memory_region_is_ram(section->mr) ||
1921 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001922 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001923 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001924 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001925 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001926 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001927 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001928 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001929 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001930 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001931 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001932 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001933 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001934 l = 2;
1935 } else {
bellard1c213d12005-09-03 10:49:04 +00001936 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001937 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001938 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001939 l = 1;
1940 }
1941 } else {
1942 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001943 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001944 + memory_region_section_addr(section,
1945 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001946 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001947 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001948 }
1949 }
1950 len -= l;
1951 buf += l;
1952 addr += l;
1953 }
1954}
bellard8df1cd02005-01-28 22:37:22 +00001955
Avi Kivitya8170e52012-10-23 12:30:10 +02001956void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001957 const uint8_t *buf, int len)
1958{
1959 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1960}
1961
1962/**
1963 * address_space_read: read from an address space.
1964 *
1965 * @as: #AddressSpace to be accessed
1966 * @addr: address within that address space
1967 * @buf: buffer with the data transferred
1968 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001969void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001970{
1971 address_space_rw(as, addr, buf, len, false);
1972}
1973
1974
Avi Kivitya8170e52012-10-23 12:30:10 +02001975void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001976 int len, int is_write)
1977{
1978 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1979}
1980
bellardd0ecd2a2006-04-23 17:14:48 +00001981/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001982void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001983 const uint8_t *buf, int len)
1984{
Avi Kivityac1970f2012-10-03 16:22:53 +02001985 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001986 int l;
1987 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001988 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001989 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001990
bellardd0ecd2a2006-04-23 17:14:48 +00001991 while (len > 0) {
1992 page = addr & TARGET_PAGE_MASK;
1993 l = (page + TARGET_PAGE_SIZE) - addr;
1994 if (l > len)
1995 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001996 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001997
Blue Swirlcc5bea62012-04-14 14:56:48 +00001998 if (!(memory_region_is_ram(section->mr) ||
1999 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002000 /* do nothing */
2001 } else {
2002 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002003 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002004 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002005 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002006 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002007 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002008 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002009 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002010 }
2011 len -= l;
2012 buf += l;
2013 addr += l;
2014 }
2015}
2016
aliguori6d16c2f2009-01-22 16:59:11 +00002017typedef struct {
2018 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002019 hwaddr addr;
2020 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002021} BounceBuffer;
2022
2023static BounceBuffer bounce;
2024
aliguoriba223c22009-01-22 16:59:16 +00002025typedef struct MapClient {
2026 void *opaque;
2027 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002028 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002029} MapClient;
2030
Blue Swirl72cf2d42009-09-12 07:36:22 +00002031static QLIST_HEAD(map_client_list, MapClient) map_client_list
2032 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002033
2034void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2035{
Anthony Liguori7267c092011-08-20 22:09:37 -05002036 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002037
2038 client->opaque = opaque;
2039 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002040 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002041 return client;
2042}
2043
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002044static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002045{
2046 MapClient *client = (MapClient *)_client;
2047
Blue Swirl72cf2d42009-09-12 07:36:22 +00002048 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002049 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002050}
2051
2052static void cpu_notify_map_clients(void)
2053{
2054 MapClient *client;
2055
Blue Swirl72cf2d42009-09-12 07:36:22 +00002056 while (!QLIST_EMPTY(&map_client_list)) {
2057 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002058 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002059 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002060 }
2061}
2062
aliguori6d16c2f2009-01-22 16:59:11 +00002063/* Map a physical memory region into a host virtual address.
2064 * May map a subset of the requested range, given by and returned in *plen.
2065 * May return NULL if resources needed to perform the mapping are exhausted.
2066 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002067 * Use cpu_register_map_client() to know when retrying the map operation is
2068 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002069 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002070void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002071 hwaddr addr,
2072 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002073 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002074{
Avi Kivityac1970f2012-10-03 16:22:53 +02002075 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002076 hwaddr len = *plen;
2077 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002078 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002079 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002080 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002081 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002082 ram_addr_t rlen;
2083 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002084
2085 while (len > 0) {
2086 page = addr & TARGET_PAGE_MASK;
2087 l = (page + TARGET_PAGE_SIZE) - addr;
2088 if (l > len)
2089 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002090 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002091
Avi Kivityf3705d52012-03-08 16:16:34 +02002092 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002093 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002094 break;
2095 }
2096 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2097 bounce.addr = addr;
2098 bounce.len = l;
2099 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002100 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002101 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002102
2103 *plen = l;
2104 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002105 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002106 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002107 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002108 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002109 }
aliguori6d16c2f2009-01-22 16:59:11 +00002110
2111 len -= l;
2112 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002113 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002114 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002115 rlen = todo;
2116 ret = qemu_ram_ptr_length(raddr, &rlen);
2117 *plen = rlen;
2118 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002119}
2120
Avi Kivityac1970f2012-10-03 16:22:53 +02002121/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002122 * Will also mark the memory as dirty if is_write == 1. access_len gives
2123 * the amount of memory that was actually read or written by the caller.
2124 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002125void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2126 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002127{
2128 if (buffer != bounce.buffer) {
2129 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002130 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002131 while (access_len) {
2132 unsigned l;
2133 l = TARGET_PAGE_SIZE;
2134 if (l > access_len)
2135 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002136 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002137 addr1 += l;
2138 access_len -= l;
2139 }
2140 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002141 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002142 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002143 }
aliguori6d16c2f2009-01-22 16:59:11 +00002144 return;
2145 }
2146 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002147 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002148 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002149 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002150 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002151 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002152}
bellardd0ecd2a2006-04-23 17:14:48 +00002153
Avi Kivitya8170e52012-10-23 12:30:10 +02002154void *cpu_physical_memory_map(hwaddr addr,
2155 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002156 int is_write)
2157{
2158 return address_space_map(&address_space_memory, addr, plen, is_write);
2159}
2160
Avi Kivitya8170e52012-10-23 12:30:10 +02002161void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2162 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002163{
2164 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2165}
2166
bellard8df1cd02005-01-28 22:37:22 +00002167/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002168static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002169 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002170{
bellard8df1cd02005-01-28 22:37:22 +00002171 uint8_t *ptr;
2172 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002173 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002174
Avi Kivityac1970f2012-10-03 16:22:53 +02002175 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002176
Blue Swirlcc5bea62012-04-14 14:56:48 +00002177 if (!(memory_region_is_ram(section->mr) ||
2178 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002179 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002180 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002181 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002182#if defined(TARGET_WORDS_BIGENDIAN)
2183 if (endian == DEVICE_LITTLE_ENDIAN) {
2184 val = bswap32(val);
2185 }
2186#else
2187 if (endian == DEVICE_BIG_ENDIAN) {
2188 val = bswap32(val);
2189 }
2190#endif
bellard8df1cd02005-01-28 22:37:22 +00002191 } else {
2192 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002193 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002194 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002195 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002196 switch (endian) {
2197 case DEVICE_LITTLE_ENDIAN:
2198 val = ldl_le_p(ptr);
2199 break;
2200 case DEVICE_BIG_ENDIAN:
2201 val = ldl_be_p(ptr);
2202 break;
2203 default:
2204 val = ldl_p(ptr);
2205 break;
2206 }
bellard8df1cd02005-01-28 22:37:22 +00002207 }
2208 return val;
2209}
2210
Avi Kivitya8170e52012-10-23 12:30:10 +02002211uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002212{
2213 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2214}
2215
Avi Kivitya8170e52012-10-23 12:30:10 +02002216uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002217{
2218 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2219}
2220
Avi Kivitya8170e52012-10-23 12:30:10 +02002221uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002222{
2223 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2224}
2225
bellard84b7b8e2005-11-28 21:19:04 +00002226/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002227static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002228 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002229{
bellard84b7b8e2005-11-28 21:19:04 +00002230 uint8_t *ptr;
2231 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002232 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002233
Avi Kivityac1970f2012-10-03 16:22:53 +02002234 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002235
Blue Swirlcc5bea62012-04-14 14:56:48 +00002236 if (!(memory_region_is_ram(section->mr) ||
2237 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002238 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002239 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002240
2241 /* XXX This is broken when device endian != cpu endian.
2242 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002243#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002244 val = io_mem_read(section->mr, addr, 4) << 32;
2245 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002246#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002247 val = io_mem_read(section->mr, addr, 4);
2248 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002249#endif
2250 } else {
2251 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002252 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002253 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002254 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002255 switch (endian) {
2256 case DEVICE_LITTLE_ENDIAN:
2257 val = ldq_le_p(ptr);
2258 break;
2259 case DEVICE_BIG_ENDIAN:
2260 val = ldq_be_p(ptr);
2261 break;
2262 default:
2263 val = ldq_p(ptr);
2264 break;
2265 }
bellard84b7b8e2005-11-28 21:19:04 +00002266 }
2267 return val;
2268}
2269
Avi Kivitya8170e52012-10-23 12:30:10 +02002270uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002271{
2272 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2273}
2274
Avi Kivitya8170e52012-10-23 12:30:10 +02002275uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002276{
2277 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2278}
2279
Avi Kivitya8170e52012-10-23 12:30:10 +02002280uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002281{
2282 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2283}
2284
bellardaab33092005-10-30 20:48:42 +00002285/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002286uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002287{
2288 uint8_t val;
2289 cpu_physical_memory_read(addr, &val, 1);
2290 return val;
2291}
2292
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002293/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002294static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002295 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002296{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002297 uint8_t *ptr;
2298 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002299 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002300
Avi Kivityac1970f2012-10-03 16:22:53 +02002301 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002302
Blue Swirlcc5bea62012-04-14 14:56:48 +00002303 if (!(memory_region_is_ram(section->mr) ||
2304 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002305 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002306 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002307 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002308#if defined(TARGET_WORDS_BIGENDIAN)
2309 if (endian == DEVICE_LITTLE_ENDIAN) {
2310 val = bswap16(val);
2311 }
2312#else
2313 if (endian == DEVICE_BIG_ENDIAN) {
2314 val = bswap16(val);
2315 }
2316#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002317 } else {
2318 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002319 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002320 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002321 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002322 switch (endian) {
2323 case DEVICE_LITTLE_ENDIAN:
2324 val = lduw_le_p(ptr);
2325 break;
2326 case DEVICE_BIG_ENDIAN:
2327 val = lduw_be_p(ptr);
2328 break;
2329 default:
2330 val = lduw_p(ptr);
2331 break;
2332 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002333 }
2334 return val;
bellardaab33092005-10-30 20:48:42 +00002335}
2336
Avi Kivitya8170e52012-10-23 12:30:10 +02002337uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002338{
2339 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2340}
2341
Avi Kivitya8170e52012-10-23 12:30:10 +02002342uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002343{
2344 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2345}
2346
Avi Kivitya8170e52012-10-23 12:30:10 +02002347uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002348{
2349 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2350}
2351
bellard8df1cd02005-01-28 22:37:22 +00002352/* warning: addr must be aligned. The ram page is not masked as dirty
2353 and the code inside is not invalidated. It is useful if the dirty
2354 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002355void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002356{
bellard8df1cd02005-01-28 22:37:22 +00002357 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002358 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002359
Avi Kivityac1970f2012-10-03 16:22:53 +02002360 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002361
Avi Kivityf3705d52012-03-08 16:16:34 +02002362 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002363 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002364 if (memory_region_is_ram(section->mr)) {
2365 section = &phys_sections[phys_section_rom];
2366 }
2367 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002368 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002369 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002370 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002371 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002372 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002373 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002374
2375 if (unlikely(in_migration)) {
2376 if (!cpu_physical_memory_is_dirty(addr1)) {
2377 /* invalidate code */
2378 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2379 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002380 cpu_physical_memory_set_dirty_flags(
2381 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002382 }
2383 }
bellard8df1cd02005-01-28 22:37:22 +00002384 }
2385}
2386
Avi Kivitya8170e52012-10-23 12:30:10 +02002387void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002388{
j_mayerbc98a7e2007-04-04 07:55:12 +00002389 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002390 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002391
Avi Kivityac1970f2012-10-03 16:22:53 +02002392 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002393
Avi Kivityf3705d52012-03-08 16:16:34 +02002394 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002395 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002396 if (memory_region_is_ram(section->mr)) {
2397 section = &phys_sections[phys_section_rom];
2398 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002399#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002400 io_mem_write(section->mr, addr, val >> 32, 4);
2401 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002402#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002403 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2404 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002405#endif
2406 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002407 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002408 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002409 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002410 stq_p(ptr, val);
2411 }
2412}
2413
bellard8df1cd02005-01-28 22:37:22 +00002414/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002415static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002416 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002417{
bellard8df1cd02005-01-28 22:37:22 +00002418 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002419 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002420
Avi Kivityac1970f2012-10-03 16:22:53 +02002421 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002422
Avi Kivityf3705d52012-03-08 16:16:34 +02002423 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002424 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002425 if (memory_region_is_ram(section->mr)) {
2426 section = &phys_sections[phys_section_rom];
2427 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002428#if defined(TARGET_WORDS_BIGENDIAN)
2429 if (endian == DEVICE_LITTLE_ENDIAN) {
2430 val = bswap32(val);
2431 }
2432#else
2433 if (endian == DEVICE_BIG_ENDIAN) {
2434 val = bswap32(val);
2435 }
2436#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002437 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002438 } else {
2439 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002440 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002441 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002442 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002443 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002444 switch (endian) {
2445 case DEVICE_LITTLE_ENDIAN:
2446 stl_le_p(ptr, val);
2447 break;
2448 case DEVICE_BIG_ENDIAN:
2449 stl_be_p(ptr, val);
2450 break;
2451 default:
2452 stl_p(ptr, val);
2453 break;
2454 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002455 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002456 }
2457}
2458
Avi Kivitya8170e52012-10-23 12:30:10 +02002459void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002460{
2461 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2462}
2463
Avi Kivitya8170e52012-10-23 12:30:10 +02002464void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002465{
2466 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2467}
2468
Avi Kivitya8170e52012-10-23 12:30:10 +02002469void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002470{
2471 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2472}
2473
bellardaab33092005-10-30 20:48:42 +00002474/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002475void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002476{
2477 uint8_t v = val;
2478 cpu_physical_memory_write(addr, &v, 1);
2479}
2480
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002481/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002482static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002483 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002484{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002485 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002486 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002487
Avi Kivityac1970f2012-10-03 16:22:53 +02002488 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002489
Avi Kivityf3705d52012-03-08 16:16:34 +02002490 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002491 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002492 if (memory_region_is_ram(section->mr)) {
2493 section = &phys_sections[phys_section_rom];
2494 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002495#if defined(TARGET_WORDS_BIGENDIAN)
2496 if (endian == DEVICE_LITTLE_ENDIAN) {
2497 val = bswap16(val);
2498 }
2499#else
2500 if (endian == DEVICE_BIG_ENDIAN) {
2501 val = bswap16(val);
2502 }
2503#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002504 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002505 } else {
2506 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002507 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002508 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002509 /* RAM case */
2510 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002511 switch (endian) {
2512 case DEVICE_LITTLE_ENDIAN:
2513 stw_le_p(ptr, val);
2514 break;
2515 case DEVICE_BIG_ENDIAN:
2516 stw_be_p(ptr, val);
2517 break;
2518 default:
2519 stw_p(ptr, val);
2520 break;
2521 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002522 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002523 }
bellardaab33092005-10-30 20:48:42 +00002524}
2525
Avi Kivitya8170e52012-10-23 12:30:10 +02002526void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002527{
2528 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2529}
2530
Avi Kivitya8170e52012-10-23 12:30:10 +02002531void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002532{
2533 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2534}
2535
Avi Kivitya8170e52012-10-23 12:30:10 +02002536void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002537{
2538 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2539}
2540
bellardaab33092005-10-30 20:48:42 +00002541/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002542void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002543{
2544 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002545 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002546}
2547
Avi Kivitya8170e52012-10-23 12:30:10 +02002548void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002549{
2550 val = cpu_to_le64(val);
2551 cpu_physical_memory_write(addr, &val, 8);
2552}
2553
Avi Kivitya8170e52012-10-23 12:30:10 +02002554void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002555{
2556 val = cpu_to_be64(val);
2557 cpu_physical_memory_write(addr, &val, 8);
2558}
2559
aliguori5e2972f2009-03-28 17:51:36 +00002560/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002561int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002562 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002563{
2564 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002565 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002566 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002567
2568 while (len > 0) {
2569 page = addr & TARGET_PAGE_MASK;
2570 phys_addr = cpu_get_phys_page_debug(env, page);
2571 /* if no physical page mapped, return an error */
2572 if (phys_addr == -1)
2573 return -1;
2574 l = (page + TARGET_PAGE_SIZE) - addr;
2575 if (l > len)
2576 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002577 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002578 if (is_write)
2579 cpu_physical_memory_write_rom(phys_addr, buf, l);
2580 else
aliguori5e2972f2009-03-28 17:51:36 +00002581 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002582 len -= l;
2583 buf += l;
2584 addr += l;
2585 }
2586 return 0;
2587}
Paul Brooka68fe892010-03-01 00:08:59 +00002588#endif
bellard13eb76e2004-01-24 15:23:36 +00002589
Paul Brookb3755a92010-03-12 16:54:58 +00002590#if !defined(CONFIG_USER_ONLY)
2591
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00002592/*
2593 * A helper function for the _utterly broken_ virtio device model to find out if
2594 * it's running on a big endian machine. Don't do this at home kids!
2595 */
2596bool virtio_is_big_endian(void);
2597bool virtio_is_big_endian(void)
2598{
2599#if defined(TARGET_WORDS_BIGENDIAN)
2600 return true;
2601#else
2602 return false;
2603#endif
2604}
2605
bellard61382a52003-10-27 21:22:23 +00002606#endif
Wen Congyang76f35532012-05-07 12:04:18 +08002607
2608#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002609bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002610{
2611 MemoryRegionSection *section;
2612
Avi Kivityac1970f2012-10-03 16:22:53 +02002613 section = phys_page_find(address_space_memory.dispatch,
2614 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002615
2616 return !(memory_region_is_ram(section->mr) ||
2617 memory_region_is_romd(section->mr));
2618}
2619#endif