blob: de5b27dd771697f190377f5783aca6f4aef887a8 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
pbrook9656f322008-07-01 20:01:19 +0000222#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100226 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000231 tlb_flush(env, 1);
232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
pbrook9656f322008-07-01 20:01:19 +0000248#endif
249
Andreas Färber9349b4f2012-03-14 01:38:32 +0100250CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400251{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100252 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400253
254 while (env) {
255 if (env->cpu_index == cpu)
256 break;
257 env = env->next_cpu;
258 }
259
260 return env;
261}
262
Andreas Färber9349b4f2012-03-14 01:38:32 +0100263void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000264{
Andreas Färber9f09e182012-05-03 06:59:07 +0200265 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100266 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000267 int cpu_index;
268
pbrookc2764712009-03-07 15:24:59 +0000269#if defined(CONFIG_USER_ONLY)
270 cpu_list_lock();
271#endif
bellard6a00d602005-11-21 23:25:50 +0000272 env->next_cpu = NULL;
273 penv = &first_cpu;
274 cpu_index = 0;
275 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700276 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000277 cpu_index++;
278 }
279 env->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100280 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000281 QTAILQ_INIT(&env->breakpoints);
282 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100283#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200284 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100285#endif
bellard6a00d602005-11-21 23:25:50 +0000286 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000287#if defined(CONFIG_USER_ONLY)
288 cpu_list_unlock();
289#endif
pbrookb3c77242008-06-30 16:31:04 +0000290#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600291 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
292 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000293 cpu_save, cpu_load, env);
294#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000295}
296
bellard1fddef42005-04-17 19:16:13 +0000297#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000298#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100299static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000300{
301 tb_invalidate_phys_page_range(pc, pc + 1, 0);
302}
303#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400304static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
305{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400306 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
307 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400308}
bellardc27004e2005-01-03 23:35:10 +0000309#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000310#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000311
Paul Brookc527ee82010-03-01 03:31:14 +0000312#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100313void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000314
315{
316}
317
Andreas Färber9349b4f2012-03-14 01:38:32 +0100318int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000319 int flags, CPUWatchpoint **watchpoint)
320{
321 return -ENOSYS;
322}
323#else
pbrook6658ffb2007-03-16 23:58:11 +0000324/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100325int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000326 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000327{
aliguorib4051332008-11-18 20:14:20 +0000328 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000329 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000330
aliguorib4051332008-11-18 20:14:20 +0000331 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400332 if ((len & (len - 1)) || (addr & ~len_mask) ||
333 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000334 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
335 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
336 return -EINVAL;
337 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500338 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000339
aliguoria1d1bb32008-11-18 20:07:32 +0000340 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000341 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000342 wp->flags = flags;
343
aliguori2dc9f412008-11-18 20:56:59 +0000344 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000345 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000346 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000347 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000348 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000349
pbrook6658ffb2007-03-16 23:58:11 +0000350 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000351
352 if (watchpoint)
353 *watchpoint = wp;
354 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000355}
356
aliguoria1d1bb32008-11-18 20:07:32 +0000357/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100358int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000359 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000360{
aliguorib4051332008-11-18 20:14:20 +0000361 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000362 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000363
Blue Swirl72cf2d42009-09-12 07:36:22 +0000364 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000365 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000366 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000367 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000368 return 0;
369 }
370 }
aliguoria1d1bb32008-11-18 20:07:32 +0000371 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000372}
373
aliguoria1d1bb32008-11-18 20:07:32 +0000374/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100375void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000376{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000377 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000378
aliguoria1d1bb32008-11-18 20:07:32 +0000379 tlb_flush_page(env, watchpoint->vaddr);
380
Anthony Liguori7267c092011-08-20 22:09:37 -0500381 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000382}
383
aliguoria1d1bb32008-11-18 20:07:32 +0000384/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100385void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000386{
aliguoric0ce9982008-11-25 22:13:57 +0000387 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000388
Blue Swirl72cf2d42009-09-12 07:36:22 +0000389 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000390 if (wp->flags & mask)
391 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000392 }
aliguoria1d1bb32008-11-18 20:07:32 +0000393}
Paul Brookc527ee82010-03-01 03:31:14 +0000394#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000395
396/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100397int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000398 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000399{
bellard1fddef42005-04-17 19:16:13 +0000400#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000401 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000402
Anthony Liguori7267c092011-08-20 22:09:37 -0500403 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000404
405 bp->pc = pc;
406 bp->flags = flags;
407
aliguori2dc9f412008-11-18 20:56:59 +0000408 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000409 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000410 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000411 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000412 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000413
414 breakpoint_invalidate(env, pc);
415
416 if (breakpoint)
417 *breakpoint = bp;
418 return 0;
419#else
420 return -ENOSYS;
421#endif
422}
423
424/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100425int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000426{
427#if defined(TARGET_HAS_ICE)
428 CPUBreakpoint *bp;
429
Blue Swirl72cf2d42009-09-12 07:36:22 +0000430 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000431 if (bp->pc == pc && bp->flags == flags) {
432 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000433 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000434 }
bellard4c3a88a2003-07-26 12:06:08 +0000435 }
aliguoria1d1bb32008-11-18 20:07:32 +0000436 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000437#else
aliguoria1d1bb32008-11-18 20:07:32 +0000438 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000439#endif
440}
441
aliguoria1d1bb32008-11-18 20:07:32 +0000442/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100443void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000444{
bellard1fddef42005-04-17 19:16:13 +0000445#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000446 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000447
aliguoria1d1bb32008-11-18 20:07:32 +0000448 breakpoint_invalidate(env, breakpoint->pc);
449
Anthony Liguori7267c092011-08-20 22:09:37 -0500450 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000451#endif
452}
453
454/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100455void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000456{
457#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000458 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000459
Blue Swirl72cf2d42009-09-12 07:36:22 +0000460 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000461 if (bp->flags & mask)
462 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000463 }
bellard4c3a88a2003-07-26 12:06:08 +0000464#endif
465}
466
bellardc33a3462003-07-29 20:50:33 +0000467/* enable or disable single step mode. EXCP_DEBUG is returned by the
468 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100469void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000470{
bellard1fddef42005-04-17 19:16:13 +0000471#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000472 if (env->singlestep_enabled != enabled) {
473 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000474 if (kvm_enabled())
475 kvm_update_guest_debug(env, 0);
476 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100477 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000478 /* XXX: only flush what is necessary */
479 tb_flush(env);
480 }
bellardc33a3462003-07-29 20:50:33 +0000481 }
482#endif
483}
484
Andreas Färber9349b4f2012-03-14 01:38:32 +0100485void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000486{
487 env->interrupt_request &= ~mask;
488}
489
Andreas Färber9349b4f2012-03-14 01:38:32 +0100490void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000491{
492 env->exit_request = 1;
493 cpu_unlink_tb(env);
494}
495
Andreas Färber9349b4f2012-03-14 01:38:32 +0100496void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000497{
498 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000499 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000500
501 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000502 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000503 fprintf(stderr, "qemu: fatal: ");
504 vfprintf(stderr, fmt, ap);
505 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100506 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000507 if (qemu_log_enabled()) {
508 qemu_log("qemu: fatal: ");
509 qemu_log_vprintf(fmt, ap2);
510 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100511 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000512 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000513 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000514 }
pbrook493ae1f2007-11-23 16:53:59 +0000515 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000516 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200517#if defined(CONFIG_USER_ONLY)
518 {
519 struct sigaction act;
520 sigfillset(&act.sa_mask);
521 act.sa_handler = SIG_DFL;
522 sigaction(SIGABRT, &act, NULL);
523 }
524#endif
bellard75012672003-06-21 13:11:07 +0000525 abort();
526}
527
Andreas Färber9349b4f2012-03-14 01:38:32 +0100528CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000529{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100530 CPUArchState *new_env = cpu_init(env->cpu_model_str);
531 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +0000532 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000533#if defined(TARGET_HAS_ICE)
534 CPUBreakpoint *bp;
535 CPUWatchpoint *wp;
536#endif
537
Andreas Färber9349b4f2012-03-14 01:38:32 +0100538 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000539
540 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +0000541 new_env->next_cpu = next_cpu;
542 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +0000543
544 /* Clone all break/watchpoints.
545 Note: Once we support ptrace with hw-debug register access, make sure
546 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000547 QTAILQ_INIT(&env->breakpoints);
548 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000549#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000550 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000551 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
552 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000553 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000554 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
555 wp->flags, NULL);
556 }
557#endif
558
thsc5be9f02007-02-28 20:20:53 +0000559 return new_env;
560}
561
bellard01243112004-01-04 15:48:17 +0000562#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200563static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
564 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000565{
Juan Quintelad24981d2012-05-22 00:42:40 +0200566 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000567
bellard1ccde1c2004-02-06 19:46:14 +0000568 /* we modify the TLB cache so that the dirty bit will be set again
569 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200570 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200571 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000572 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200573 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000574 != (end - 1) - start) {
575 abort();
576 }
Blue Swirle5548612012-04-21 13:08:33 +0000577 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200578
579}
580
581/* Note: start and end must be within the same ram block. */
582void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
583 int dirty_flags)
584{
585 uintptr_t length;
586
587 start &= TARGET_PAGE_MASK;
588 end = TARGET_PAGE_ALIGN(end);
589
590 length = end - start;
591 if (length == 0)
592 return;
593 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
594
595 if (tcg_enabled()) {
596 tlb_reset_dirty_range_all(start, end, length);
597 }
bellard1ccde1c2004-02-06 19:46:14 +0000598}
599
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000600static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000601{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200602 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000603 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200604 return ret;
aliguori74576192008-10-06 14:02:03 +0000605}
606
Avi Kivitya8170e52012-10-23 12:30:10 +0200607hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000608 MemoryRegionSection *section,
609 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200610 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000611 int prot,
612 target_ulong *address)
613{
Avi Kivitya8170e52012-10-23 12:30:10 +0200614 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000615 CPUWatchpoint *wp;
616
Blue Swirlcc5bea62012-04-14 14:56:48 +0000617 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000618 /* Normal RAM. */
619 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000620 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000621 if (!section->readonly) {
622 iotlb |= phys_section_notdirty;
623 } else {
624 iotlb |= phys_section_rom;
625 }
626 } else {
627 /* IO handlers are currently passed a physical address.
628 It would be nice to pass an offset from the base address
629 of that region. This would avoid having to special case RAM,
630 and avoid full address decoding in every device.
631 We can't use the high bits of pd for this because
632 IO_MEM_ROMD uses these as a ram address. */
633 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000634 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000635 }
636
637 /* Make accesses to pages with watchpoints go via the
638 watchpoint trap routines. */
639 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
640 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
641 /* Avoid trapping reads of pages with a write breakpoint. */
642 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
643 iotlb = phys_section_watch + paddr;
644 *address |= TLB_MMIO;
645 break;
646 }
647 }
648 }
649
650 return iotlb;
651}
bellard9fa3e852004-01-04 18:06:42 +0000652#endif /* defined(CONFIG_USER_ONLY) */
653
pbrooke2eef172008-06-08 01:09:01 +0000654#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000655
Paul Brookc04b2b72010-03-01 03:31:14 +0000656#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
657typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200658 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200659 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200660 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000661} subpage_t;
662
Anthony Liguoric227f092009-10-01 16:12:16 -0500663static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200664 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200665static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200666static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200667{
Avi Kivity5312bd82012-02-12 18:32:55 +0200668 MemoryRegionSection *section = &phys_sections[section_index];
669 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200670
671 if (mr->subpage) {
672 subpage_t *subpage = container_of(mr, subpage_t, iomem);
673 memory_region_destroy(&subpage->iomem);
674 g_free(subpage);
675 }
676}
677
Avi Kivity4346ae32012-02-10 17:00:01 +0200678static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200679{
680 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200681 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200682
Avi Kivityc19e8802012-02-13 20:25:31 +0200683 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200684 return;
685 }
686
Avi Kivityc19e8802012-02-13 20:25:31 +0200687 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200688 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200689 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200690 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200691 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200692 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200693 }
Avi Kivity54688b12012-02-09 17:34:32 +0200694 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200695 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200696 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200697}
698
Avi Kivityac1970f2012-10-03 16:22:53 +0200699static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200700{
Avi Kivityac1970f2012-10-03 16:22:53 +0200701 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200702 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200703}
704
Avi Kivity5312bd82012-02-12 18:32:55 +0200705static uint16_t phys_section_add(MemoryRegionSection *section)
706{
707 if (phys_sections_nb == phys_sections_nb_alloc) {
708 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
709 phys_sections = g_renew(MemoryRegionSection, phys_sections,
710 phys_sections_nb_alloc);
711 }
712 phys_sections[phys_sections_nb] = *section;
713 return phys_sections_nb++;
714}
715
716static void phys_sections_clear(void)
717{
718 phys_sections_nb = 0;
719}
720
Avi Kivityac1970f2012-10-03 16:22:53 +0200721static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200722{
723 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200724 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200725 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200726 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200727 MemoryRegionSection subsection = {
728 .offset_within_address_space = base,
729 .size = TARGET_PAGE_SIZE,
730 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200731 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200732
Avi Kivityf3705d52012-03-08 16:16:34 +0200733 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200734
Avi Kivityf3705d52012-03-08 16:16:34 +0200735 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200736 subpage = subpage_init(base);
737 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200738 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200739 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200740 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200741 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200742 }
743 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400744 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200745 subpage_register(subpage, start, end, phys_section_add(section));
746}
747
748
Avi Kivityac1970f2012-10-03 16:22:53 +0200749static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000750{
Avi Kivitya8170e52012-10-23 12:30:10 +0200751 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200752 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200753 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200754 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200755
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200756 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200757
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200758 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200759 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200760 section_index);
bellard33417e72003-08-10 21:47:01 +0000761}
762
Avi Kivityac1970f2012-10-03 16:22:53 +0200763static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200764{
Avi Kivityac1970f2012-10-03 16:22:53 +0200765 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200766 MemoryRegionSection now = *section, remain = *section;
767
768 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
769 || (now.size < TARGET_PAGE_SIZE)) {
770 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
771 - now.offset_within_address_space,
772 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200773 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200774 remain.size -= now.size;
775 remain.offset_within_address_space += now.size;
776 remain.offset_within_region += now.size;
777 }
Tyler Hall69b67642012-07-25 18:45:04 -0400778 while (remain.size >= TARGET_PAGE_SIZE) {
779 now = remain;
780 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
781 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200782 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400783 } else {
784 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200785 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400786 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200787 remain.size -= now.size;
788 remain.offset_within_address_space += now.size;
789 remain.offset_within_region += now.size;
790 }
791 now = remain;
792 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200793 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200794 }
795}
796
Sheng Yang62a27442010-01-26 19:21:16 +0800797void qemu_flush_coalesced_mmio_buffer(void)
798{
799 if (kvm_enabled())
800 kvm_flush_coalesced_mmio_buffer();
801}
802
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700803void qemu_mutex_lock_ramlist(void)
804{
805 qemu_mutex_lock(&ram_list.mutex);
806}
807
808void qemu_mutex_unlock_ramlist(void)
809{
810 qemu_mutex_unlock(&ram_list.mutex);
811}
812
Marcelo Tosattic9027602010-03-01 20:25:08 -0300813#if defined(__linux__) && !defined(TARGET_S390X)
814
815#include <sys/vfs.h>
816
817#define HUGETLBFS_MAGIC 0x958458f6
818
819static long gethugepagesize(const char *path)
820{
821 struct statfs fs;
822 int ret;
823
824 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900825 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300826 } while (ret != 0 && errno == EINTR);
827
828 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900829 perror(path);
830 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300831 }
832
833 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900834 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300835
836 return fs.f_bsize;
837}
838
Alex Williamson04b16652010-07-02 11:13:17 -0600839static void *file_ram_alloc(RAMBlock *block,
840 ram_addr_t memory,
841 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300842{
843 char *filename;
844 void *area;
845 int fd;
846#ifdef MAP_POPULATE
847 int flags;
848#endif
849 unsigned long hpagesize;
850
851 hpagesize = gethugepagesize(path);
852 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900853 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300854 }
855
856 if (memory < hpagesize) {
857 return NULL;
858 }
859
860 if (kvm_enabled() && !kvm_has_sync_mmu()) {
861 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
862 return NULL;
863 }
864
865 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900866 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300867 }
868
869 fd = mkstemp(filename);
870 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900871 perror("unable to create backing store for hugepages");
872 free(filename);
873 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300874 }
875 unlink(filename);
876 free(filename);
877
878 memory = (memory+hpagesize-1) & ~(hpagesize-1);
879
880 /*
881 * ftruncate is not supported by hugetlbfs in older
882 * hosts, so don't bother bailing out on errors.
883 * If anything goes wrong with it under other filesystems,
884 * mmap will fail.
885 */
886 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900887 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300888
889#ifdef MAP_POPULATE
890 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
891 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
892 * to sidestep this quirk.
893 */
894 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
895 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
896#else
897 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
898#endif
899 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900900 perror("file_ram_alloc: can't mmap RAM pages");
901 close(fd);
902 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300903 }
Alex Williamson04b16652010-07-02 11:13:17 -0600904 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300905 return area;
906}
907#endif
908
Alex Williamsond17b5282010-06-25 11:08:38 -0600909static ram_addr_t find_ram_offset(ram_addr_t size)
910{
Alex Williamson04b16652010-07-02 11:13:17 -0600911 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600912 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600913
Paolo Bonzinia3161032012-11-14 15:54:48 +0100914 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600915 return 0;
916
Paolo Bonzinia3161032012-11-14 15:54:48 +0100917 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000918 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600919
920 end = block->offset + block->length;
921
Paolo Bonzinia3161032012-11-14 15:54:48 +0100922 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600923 if (next_block->offset >= end) {
924 next = MIN(next, next_block->offset);
925 }
926 }
927 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600928 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600929 mingap = next - end;
930 }
931 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600932
933 if (offset == RAM_ADDR_MAX) {
934 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
935 (uint64_t)size);
936 abort();
937 }
938
Alex Williamson04b16652010-07-02 11:13:17 -0600939 return offset;
940}
941
Juan Quintela652d7ec2012-07-20 10:37:54 +0200942ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600943{
Alex Williamsond17b5282010-06-25 11:08:38 -0600944 RAMBlock *block;
945 ram_addr_t last = 0;
946
Paolo Bonzinia3161032012-11-14 15:54:48 +0100947 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600948 last = MAX(last, block->offset + block->length);
949
950 return last;
951}
952
Jason Baronddb97f12012-08-02 15:44:16 -0400953static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
954{
955 int ret;
956 QemuOpts *machine_opts;
957
958 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
959 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
960 if (machine_opts &&
961 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
962 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
963 if (ret) {
964 perror("qemu_madvise");
965 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
966 "but dump_guest_core=off specified\n");
967 }
968 }
969}
970
Avi Kivityc5705a72011-12-20 15:59:12 +0200971void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600972{
973 RAMBlock *new_block, *block;
974
Avi Kivityc5705a72011-12-20 15:59:12 +0200975 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100976 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200977 if (block->offset == addr) {
978 new_block = block;
979 break;
980 }
981 }
982 assert(new_block);
983 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600984
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600985 if (dev) {
986 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600987 if (id) {
988 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500989 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600990 }
991 }
992 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
993
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700994 /* This assumes the iothread lock is taken here too. */
995 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +0100996 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200997 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -0600998 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
999 new_block->idstr);
1000 abort();
1001 }
1002 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001003 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001004}
1005
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001006static int memory_try_enable_merging(void *addr, size_t len)
1007{
1008 QemuOpts *opts;
1009
1010 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1011 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1012 /* disabled by the user */
1013 return 0;
1014 }
1015
1016 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1017}
1018
Avi Kivityc5705a72011-12-20 15:59:12 +02001019ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1020 MemoryRegion *mr)
1021{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001022 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001023
1024 size = TARGET_PAGE_ALIGN(size);
1025 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001026
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001027 /* This assumes the iothread lock is taken here too. */
1028 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001029 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001030 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001031 if (host) {
1032 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001033 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001034 } else {
1035 if (mem_path) {
1036#if defined (__linux__) && !defined(TARGET_S390X)
1037 new_block->host = file_ram_alloc(new_block, size, mem_path);
1038 if (!new_block->host) {
1039 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001040 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001041 }
1042#else
1043 fprintf(stderr, "-mem-path option unsupported\n");
1044 exit(1);
1045#endif
1046 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001047 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001048 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001049 } else if (kvm_enabled()) {
1050 /* some s390/kvm configurations have special constraints */
1051 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001052 } else {
1053 new_block->host = qemu_vmalloc(size);
1054 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001055 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001056 }
1057 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001058 new_block->length = size;
1059
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001060 /* Keep the list sorted from biggest to smallest block. */
1061 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1062 if (block->length < new_block->length) {
1063 break;
1064 }
1065 }
1066 if (block) {
1067 QTAILQ_INSERT_BEFORE(block, new_block, next);
1068 } else {
1069 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1070 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001071 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001072
Umesh Deshpandef798b072011-08-18 11:41:17 -07001073 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001074 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001075
Anthony Liguori7267c092011-08-20 22:09:37 -05001076 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001077 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001078 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1079 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001080 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001081
Jason Baronddb97f12012-08-02 15:44:16 -04001082 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001083 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001084
Cam Macdonell84b89d72010-07-26 18:10:57 -06001085 if (kvm_enabled())
1086 kvm_setup_guest_memory(new_block->host, size);
1087
1088 return new_block->offset;
1089}
1090
Avi Kivityc5705a72011-12-20 15:59:12 +02001091ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001092{
Avi Kivityc5705a72011-12-20 15:59:12 +02001093 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001094}
bellarde9a1ab12007-02-08 23:08:38 +00001095
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001096void qemu_ram_free_from_ptr(ram_addr_t addr)
1097{
1098 RAMBlock *block;
1099
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001100 /* This assumes the iothread lock is taken here too. */
1101 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001102 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001103 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001104 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001105 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001106 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001107 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001108 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001109 }
1110 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001111 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001112}
1113
Anthony Liguoric227f092009-10-01 16:12:16 -05001114void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001115{
Alex Williamson04b16652010-07-02 11:13:17 -06001116 RAMBlock *block;
1117
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001118 /* This assumes the iothread lock is taken here too. */
1119 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001120 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001121 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001122 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001123 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001124 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001125 if (block->flags & RAM_PREALLOC_MASK) {
1126 ;
1127 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001128#if defined (__linux__) && !defined(TARGET_S390X)
1129 if (block->fd) {
1130 munmap(block->host, block->length);
1131 close(block->fd);
1132 } else {
1133 qemu_vfree(block->host);
1134 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001135#else
1136 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001137#endif
1138 } else {
1139#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1140 munmap(block->host, block->length);
1141#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001142 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001143 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001144 } else {
1145 qemu_vfree(block->host);
1146 }
Alex Williamson04b16652010-07-02 11:13:17 -06001147#endif
1148 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001149 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001150 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001151 }
1152 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001153 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001154
bellarde9a1ab12007-02-08 23:08:38 +00001155}
1156
Huang Yingcd19cfa2011-03-02 08:56:19 +01001157#ifndef _WIN32
1158void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1159{
1160 RAMBlock *block;
1161 ram_addr_t offset;
1162 int flags;
1163 void *area, *vaddr;
1164
Paolo Bonzinia3161032012-11-14 15:54:48 +01001165 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001166 offset = addr - block->offset;
1167 if (offset < block->length) {
1168 vaddr = block->host + offset;
1169 if (block->flags & RAM_PREALLOC_MASK) {
1170 ;
1171 } else {
1172 flags = MAP_FIXED;
1173 munmap(vaddr, length);
1174 if (mem_path) {
1175#if defined(__linux__) && !defined(TARGET_S390X)
1176 if (block->fd) {
1177#ifdef MAP_POPULATE
1178 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1179 MAP_PRIVATE;
1180#else
1181 flags |= MAP_PRIVATE;
1182#endif
1183 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1184 flags, block->fd, offset);
1185 } else {
1186 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1187 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1188 flags, -1, 0);
1189 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001190#else
1191 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001192#endif
1193 } else {
1194#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1195 flags |= MAP_SHARED | MAP_ANONYMOUS;
1196 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1197 flags, -1, 0);
1198#else
1199 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1200 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1201 flags, -1, 0);
1202#endif
1203 }
1204 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001205 fprintf(stderr, "Could not remap addr: "
1206 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001207 length, addr);
1208 exit(1);
1209 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001210 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001211 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001212 }
1213 return;
1214 }
1215 }
1216}
1217#endif /* !_WIN32 */
1218
pbrookdc828ca2009-04-09 22:21:07 +00001219/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001220 With the exception of the softmmu code in this file, this should
1221 only be used for local memory (e.g. video ram) that the device owns,
1222 and knows it isn't going to access beyond the end of the block.
1223
1224 It should not be used for general purpose DMA.
1225 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1226 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001227void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001228{
pbrook94a6b542009-04-11 17:15:54 +00001229 RAMBlock *block;
1230
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001231 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001232 block = ram_list.mru_block;
1233 if (block && addr - block->offset < block->length) {
1234 goto found;
1235 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001236 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001237 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001238 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001239 }
pbrook94a6b542009-04-11 17:15:54 +00001240 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001241
1242 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1243 abort();
1244
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001245found:
1246 ram_list.mru_block = block;
1247 if (xen_enabled()) {
1248 /* We need to check if the requested address is in the RAM
1249 * because we don't want to map the entire memory in QEMU.
1250 * In that case just map until the end of the page.
1251 */
1252 if (block->offset == 0) {
1253 return xen_map_cache(addr, 0, 0);
1254 } else if (block->host == NULL) {
1255 block->host =
1256 xen_map_cache(block->offset, block->length, 1);
1257 }
1258 }
1259 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001260}
1261
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001262/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1263 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1264 *
1265 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001266 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001267static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001268{
1269 RAMBlock *block;
1270
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001271 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001272 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001273 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001274 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001275 /* We need to check if the requested address is in the RAM
1276 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001277 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001278 */
1279 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001280 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001281 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001282 block->host =
1283 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001284 }
1285 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001286 return block->host + (addr - block->offset);
1287 }
1288 }
1289
1290 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1291 abort();
1292
1293 return NULL;
1294}
1295
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001296/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1297 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001298static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001299{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001300 if (*size == 0) {
1301 return NULL;
1302 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001303 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001304 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001305 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001306 RAMBlock *block;
1307
Paolo Bonzinia3161032012-11-14 15:54:48 +01001308 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001309 if (addr - block->offset < block->length) {
1310 if (addr - block->offset + *size > block->length)
1311 *size = block->length - addr + block->offset;
1312 return block->host + (addr - block->offset);
1313 }
1314 }
1315
1316 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1317 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001318 }
1319}
1320
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001321void qemu_put_ram_ptr(void *addr)
1322{
1323 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001324}
1325
Marcelo Tosattie8902612010-10-11 15:31:19 -03001326int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001327{
pbrook94a6b542009-04-11 17:15:54 +00001328 RAMBlock *block;
1329 uint8_t *host = ptr;
1330
Jan Kiszka868bb332011-06-21 22:59:09 +02001331 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001332 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001333 return 0;
1334 }
1335
Paolo Bonzinia3161032012-11-14 15:54:48 +01001336 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001337 /* This case append when the block is not mapped. */
1338 if (block->host == NULL) {
1339 continue;
1340 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001341 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001342 *ram_addr = block->offset + (host - block->host);
1343 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001344 }
pbrook94a6b542009-04-11 17:15:54 +00001345 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001346
Marcelo Tosattie8902612010-10-11 15:31:19 -03001347 return -1;
1348}
Alex Williamsonf471a172010-06-11 11:11:42 -06001349
Marcelo Tosattie8902612010-10-11 15:31:19 -03001350/* Some of the softmmu routines need to translate from a host pointer
1351 (typically a TLB entry) back to a ram offset. */
1352ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1353{
1354 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001355
Marcelo Tosattie8902612010-10-11 15:31:19 -03001356 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1357 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1358 abort();
1359 }
1360 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001361}
1362
Avi Kivitya8170e52012-10-23 12:30:10 +02001363static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001364 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001365{
pbrook67d3b952006-12-18 05:03:52 +00001366#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001367 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001368#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001369#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001370 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001371#endif
1372 return 0;
1373}
1374
Avi Kivitya8170e52012-10-23 12:30:10 +02001375static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001376 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001377{
1378#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001379 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001380#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001381#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001382 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001383#endif
1384}
1385
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001386static const MemoryRegionOps unassigned_mem_ops = {
1387 .read = unassigned_mem_read,
1388 .write = unassigned_mem_write,
1389 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001390};
1391
Avi Kivitya8170e52012-10-23 12:30:10 +02001392static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001393 unsigned size)
1394{
1395 abort();
1396}
1397
Avi Kivitya8170e52012-10-23 12:30:10 +02001398static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001399 uint64_t value, unsigned size)
1400{
1401 abort();
1402}
1403
1404static const MemoryRegionOps error_mem_ops = {
1405 .read = error_mem_read,
1406 .write = error_mem_write,
1407 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001408};
1409
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001410static const MemoryRegionOps rom_mem_ops = {
1411 .read = error_mem_read,
1412 .write = unassigned_mem_write,
1413 .endianness = DEVICE_NATIVE_ENDIAN,
1414};
1415
Avi Kivitya8170e52012-10-23 12:30:10 +02001416static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001417 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001418{
bellard3a7d9292005-08-21 09:26:42 +00001419 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001420 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001421 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1422#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001423 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001424 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001425#endif
1426 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001427 switch (size) {
1428 case 1:
1429 stb_p(qemu_get_ram_ptr(ram_addr), val);
1430 break;
1431 case 2:
1432 stw_p(qemu_get_ram_ptr(ram_addr), val);
1433 break;
1434 case 4:
1435 stl_p(qemu_get_ram_ptr(ram_addr), val);
1436 break;
1437 default:
1438 abort();
1439 }
bellardf23db162005-08-21 19:12:28 +00001440 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001441 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001442 /* we remove the notdirty callback only if the code has been
1443 flushed */
1444 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001445 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001446}
1447
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001448static const MemoryRegionOps notdirty_mem_ops = {
1449 .read = error_mem_read,
1450 .write = notdirty_mem_write,
1451 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001452};
1453
pbrook0f459d12008-06-09 00:20:13 +00001454/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001455static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001456{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001457 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001458 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001459 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001460 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001461 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001462
aliguori06d55cc2008-11-18 20:24:06 +00001463 if (env->watchpoint_hit) {
1464 /* We re-entered the check after replacing the TB. Now raise
1465 * the debug interrupt so that is will trigger after the
1466 * current instruction. */
1467 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1468 return;
1469 }
pbrook2e70f6e2008-06-29 01:03:05 +00001470 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001471 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001472 if ((vaddr == (wp->vaddr & len_mask) ||
1473 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001474 wp->flags |= BP_WATCHPOINT_HIT;
1475 if (!env->watchpoint_hit) {
1476 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001477 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001478 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1479 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001480 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001481 } else {
1482 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1483 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001484 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001485 }
aliguori06d55cc2008-11-18 20:24:06 +00001486 }
aliguori6e140f22008-11-18 20:37:55 +00001487 } else {
1488 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001489 }
1490 }
1491}
1492
pbrook6658ffb2007-03-16 23:58:11 +00001493/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1494 so these check for a hit then pass through to the normal out-of-line
1495 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001496static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001497 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001498{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001499 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1500 switch (size) {
1501 case 1: return ldub_phys(addr);
1502 case 2: return lduw_phys(addr);
1503 case 4: return ldl_phys(addr);
1504 default: abort();
1505 }
pbrook6658ffb2007-03-16 23:58:11 +00001506}
1507
Avi Kivitya8170e52012-10-23 12:30:10 +02001508static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001509 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001510{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001511 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1512 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001513 case 1:
1514 stb_phys(addr, val);
1515 break;
1516 case 2:
1517 stw_phys(addr, val);
1518 break;
1519 case 4:
1520 stl_phys(addr, val);
1521 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001522 default: abort();
1523 }
pbrook6658ffb2007-03-16 23:58:11 +00001524}
1525
Avi Kivity1ec9b902012-01-02 12:47:48 +02001526static const MemoryRegionOps watch_mem_ops = {
1527 .read = watch_mem_read,
1528 .write = watch_mem_write,
1529 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001530};
pbrook6658ffb2007-03-16 23:58:11 +00001531
Avi Kivitya8170e52012-10-23 12:30:10 +02001532static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001533 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001534{
Avi Kivity70c68e42012-01-02 12:32:48 +02001535 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001536 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001537 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001538#if defined(DEBUG_SUBPAGE)
1539 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1540 mmio, len, addr, idx);
1541#endif
blueswir1db7b5422007-05-26 17:36:03 +00001542
Avi Kivity5312bd82012-02-12 18:32:55 +02001543 section = &phys_sections[mmio->sub_section[idx]];
1544 addr += mmio->base;
1545 addr -= section->offset_within_address_space;
1546 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001547 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001548}
1549
Avi Kivitya8170e52012-10-23 12:30:10 +02001550static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001551 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001552{
Avi Kivity70c68e42012-01-02 12:32:48 +02001553 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001554 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001555 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001556#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001557 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1558 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001559 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001560#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001561
Avi Kivity5312bd82012-02-12 18:32:55 +02001562 section = &phys_sections[mmio->sub_section[idx]];
1563 addr += mmio->base;
1564 addr -= section->offset_within_address_space;
1565 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001566 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001567}
1568
Avi Kivity70c68e42012-01-02 12:32:48 +02001569static const MemoryRegionOps subpage_ops = {
1570 .read = subpage_read,
1571 .write = subpage_write,
1572 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001573};
1574
Avi Kivitya8170e52012-10-23 12:30:10 +02001575static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001576 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001577{
1578 ram_addr_t raddr = addr;
1579 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001580 switch (size) {
1581 case 1: return ldub_p(ptr);
1582 case 2: return lduw_p(ptr);
1583 case 4: return ldl_p(ptr);
1584 default: abort();
1585 }
Andreas Färber56384e82011-11-30 16:26:21 +01001586}
1587
Avi Kivitya8170e52012-10-23 12:30:10 +02001588static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001589 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001590{
1591 ram_addr_t raddr = addr;
1592 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001593 switch (size) {
1594 case 1: return stb_p(ptr, value);
1595 case 2: return stw_p(ptr, value);
1596 case 4: return stl_p(ptr, value);
1597 default: abort();
1598 }
Andreas Färber56384e82011-11-30 16:26:21 +01001599}
1600
Avi Kivityde712f92012-01-02 12:41:07 +02001601static const MemoryRegionOps subpage_ram_ops = {
1602 .read = subpage_ram_read,
1603 .write = subpage_ram_write,
1604 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001605};
1606
Anthony Liguoric227f092009-10-01 16:12:16 -05001607static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001608 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001609{
1610 int idx, eidx;
1611
1612 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1613 return -1;
1614 idx = SUBPAGE_IDX(start);
1615 eidx = SUBPAGE_IDX(end);
1616#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001617 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001618 mmio, start, end, idx, eidx, memory);
1619#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001620 if (memory_region_is_ram(phys_sections[section].mr)) {
1621 MemoryRegionSection new_section = phys_sections[section];
1622 new_section.mr = &io_mem_subpage_ram;
1623 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001624 }
blueswir1db7b5422007-05-26 17:36:03 +00001625 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001626 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001627 }
1628
1629 return 0;
1630}
1631
Avi Kivitya8170e52012-10-23 12:30:10 +02001632static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001633{
Anthony Liguoric227f092009-10-01 16:12:16 -05001634 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001635
Anthony Liguori7267c092011-08-20 22:09:37 -05001636 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001637
1638 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001639 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1640 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001641 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001642#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001643 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1644 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001645#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001646 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001647
1648 return mmio;
1649}
1650
Avi Kivity5312bd82012-02-12 18:32:55 +02001651static uint16_t dummy_section(MemoryRegion *mr)
1652{
1653 MemoryRegionSection section = {
1654 .mr = mr,
1655 .offset_within_address_space = 0,
1656 .offset_within_region = 0,
1657 .size = UINT64_MAX,
1658 };
1659
1660 return phys_section_add(&section);
1661}
1662
Avi Kivitya8170e52012-10-23 12:30:10 +02001663MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001664{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001665 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001666}
1667
Avi Kivitye9179ce2009-06-14 11:38:52 +03001668static void io_mem_init(void)
1669{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001670 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001671 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1672 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1673 "unassigned", UINT64_MAX);
1674 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1675 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001676 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1677 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001678 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1679 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001680}
1681
Avi Kivityac1970f2012-10-03 16:22:53 +02001682static void mem_begin(MemoryListener *listener)
1683{
1684 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1685
1686 destroy_all_mappings(d);
1687 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1688}
1689
Avi Kivity50c1e142012-02-08 21:36:02 +02001690static void core_begin(MemoryListener *listener)
1691{
Avi Kivity5312bd82012-02-12 18:32:55 +02001692 phys_sections_clear();
1693 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001694 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1695 phys_section_rom = dummy_section(&io_mem_rom);
1696 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001697}
1698
Avi Kivity1d711482012-10-02 18:54:45 +02001699static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001700{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001701 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001702
1703 /* since each CPU stores ram addresses in its TLB cache, we must
1704 reset the modified entries */
1705 /* XXX: slow ! */
1706 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1707 tlb_flush(env, 1);
1708 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001709}
1710
Avi Kivity93632742012-02-08 16:54:16 +02001711static void core_log_global_start(MemoryListener *listener)
1712{
1713 cpu_physical_memory_set_dirty_tracking(1);
1714}
1715
1716static void core_log_global_stop(MemoryListener *listener)
1717{
1718 cpu_physical_memory_set_dirty_tracking(0);
1719}
1720
Avi Kivity4855d412012-02-08 21:16:05 +02001721static void io_region_add(MemoryListener *listener,
1722 MemoryRegionSection *section)
1723{
Avi Kivitya2d33522012-03-05 17:40:12 +02001724 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1725
1726 mrio->mr = section->mr;
1727 mrio->offset = section->offset_within_region;
1728 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001729 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001730 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001731}
1732
1733static void io_region_del(MemoryListener *listener,
1734 MemoryRegionSection *section)
1735{
1736 isa_unassign_ioport(section->offset_within_address_space, section->size);
1737}
1738
Avi Kivity93632742012-02-08 16:54:16 +02001739static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001740 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001741 .log_global_start = core_log_global_start,
1742 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001743 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001744};
1745
Avi Kivity4855d412012-02-08 21:16:05 +02001746static MemoryListener io_memory_listener = {
1747 .region_add = io_region_add,
1748 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001749 .priority = 0,
1750};
1751
Avi Kivity1d711482012-10-02 18:54:45 +02001752static MemoryListener tcg_memory_listener = {
1753 .commit = tcg_commit,
1754};
1755
Avi Kivityac1970f2012-10-03 16:22:53 +02001756void address_space_init_dispatch(AddressSpace *as)
1757{
1758 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1759
1760 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1761 d->listener = (MemoryListener) {
1762 .begin = mem_begin,
1763 .region_add = mem_add,
1764 .region_nop = mem_add,
1765 .priority = 0,
1766 };
1767 as->dispatch = d;
1768 memory_listener_register(&d->listener, as);
1769}
1770
Avi Kivity83f3c252012-10-07 12:59:55 +02001771void address_space_destroy_dispatch(AddressSpace *as)
1772{
1773 AddressSpaceDispatch *d = as->dispatch;
1774
1775 memory_listener_unregister(&d->listener);
1776 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1777 g_free(d);
1778 as->dispatch = NULL;
1779}
1780
Avi Kivity62152b82011-07-26 14:26:14 +03001781static void memory_map_init(void)
1782{
Anthony Liguori7267c092011-08-20 22:09:37 -05001783 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001784 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001785 address_space_init(&address_space_memory, system_memory);
1786 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001787
Anthony Liguori7267c092011-08-20 22:09:37 -05001788 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001789 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001790 address_space_init(&address_space_io, system_io);
1791 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001792
Avi Kivityf6790af2012-10-02 20:13:51 +02001793 memory_listener_register(&core_memory_listener, &address_space_memory);
1794 memory_listener_register(&io_memory_listener, &address_space_io);
1795 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001796
1797 dma_context_init(&dma_context_memory, &address_space_memory,
1798 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001799}
1800
1801MemoryRegion *get_system_memory(void)
1802{
1803 return system_memory;
1804}
1805
Avi Kivity309cb472011-08-08 16:09:03 +03001806MemoryRegion *get_system_io(void)
1807{
1808 return system_io;
1809}
1810
pbrooke2eef172008-06-08 01:09:01 +00001811#endif /* !defined(CONFIG_USER_ONLY) */
1812
bellard13eb76e2004-01-24 15:23:36 +00001813/* physical memory access (slow version, mainly for debug) */
1814#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001815int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001816 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001817{
1818 int l, flags;
1819 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001820 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001821
1822 while (len > 0) {
1823 page = addr & TARGET_PAGE_MASK;
1824 l = (page + TARGET_PAGE_SIZE) - addr;
1825 if (l > len)
1826 l = len;
1827 flags = page_get_flags(page);
1828 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001829 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001830 if (is_write) {
1831 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001832 return -1;
bellard579a97f2007-11-11 14:26:47 +00001833 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001834 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001835 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001836 memcpy(p, buf, l);
1837 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001838 } else {
1839 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001840 return -1;
bellard579a97f2007-11-11 14:26:47 +00001841 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001842 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001843 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001844 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001845 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001846 }
1847 len -= l;
1848 buf += l;
1849 addr += l;
1850 }
Paul Brooka68fe892010-03-01 00:08:59 +00001851 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001852}
bellard8df1cd02005-01-28 22:37:22 +00001853
bellard13eb76e2004-01-24 15:23:36 +00001854#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001855
Avi Kivitya8170e52012-10-23 12:30:10 +02001856static void invalidate_and_set_dirty(hwaddr addr,
1857 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001858{
1859 if (!cpu_physical_memory_is_dirty(addr)) {
1860 /* invalidate code */
1861 tb_invalidate_phys_page_range(addr, addr + length, 0);
1862 /* set dirty bit */
1863 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1864 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001865 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001866}
1867
Avi Kivitya8170e52012-10-23 12:30:10 +02001868void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001869 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001870{
Avi Kivityac1970f2012-10-03 16:22:53 +02001871 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001872 int l;
bellard13eb76e2004-01-24 15:23:36 +00001873 uint8_t *ptr;
1874 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001875 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001876 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001877
bellard13eb76e2004-01-24 15:23:36 +00001878 while (len > 0) {
1879 page = addr & TARGET_PAGE_MASK;
1880 l = (page + TARGET_PAGE_SIZE) - addr;
1881 if (l > len)
1882 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001883 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001884
bellard13eb76e2004-01-24 15:23:36 +00001885 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001886 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001887 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001888 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001889 /* XXX: could force cpu_single_env to NULL to avoid
1890 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001891 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001892 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001893 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001894 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001895 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001896 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001897 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001898 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001899 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001900 l = 2;
1901 } else {
bellard1c213d12005-09-03 10:49:04 +00001902 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001903 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001904 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001905 l = 1;
1906 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001907 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001908 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001909 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001910 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001911 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001912 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001913 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001914 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001915 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001916 }
1917 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001918 if (!(memory_region_is_ram(section->mr) ||
1919 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001920 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001921 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001922 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001923 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001924 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001925 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001926 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001927 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001928 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001929 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001930 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001931 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001932 l = 2;
1933 } else {
bellard1c213d12005-09-03 10:49:04 +00001934 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001935 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001936 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001937 l = 1;
1938 }
1939 } else {
1940 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001941 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001942 + memory_region_section_addr(section,
1943 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001944 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001945 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001946 }
1947 }
1948 len -= l;
1949 buf += l;
1950 addr += l;
1951 }
1952}
bellard8df1cd02005-01-28 22:37:22 +00001953
Avi Kivitya8170e52012-10-23 12:30:10 +02001954void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001955 const uint8_t *buf, int len)
1956{
1957 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1958}
1959
1960/**
1961 * address_space_read: read from an address space.
1962 *
1963 * @as: #AddressSpace to be accessed
1964 * @addr: address within that address space
1965 * @buf: buffer with the data transferred
1966 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001967void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001968{
1969 address_space_rw(as, addr, buf, len, false);
1970}
1971
1972
Avi Kivitya8170e52012-10-23 12:30:10 +02001973void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001974 int len, int is_write)
1975{
1976 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1977}
1978
bellardd0ecd2a2006-04-23 17:14:48 +00001979/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001980void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001981 const uint8_t *buf, int len)
1982{
Avi Kivityac1970f2012-10-03 16:22:53 +02001983 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001984 int l;
1985 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001986 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001987 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001988
bellardd0ecd2a2006-04-23 17:14:48 +00001989 while (len > 0) {
1990 page = addr & TARGET_PAGE_MASK;
1991 l = (page + TARGET_PAGE_SIZE) - addr;
1992 if (l > len)
1993 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001994 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001995
Blue Swirlcc5bea62012-04-14 14:56:48 +00001996 if (!(memory_region_is_ram(section->mr) ||
1997 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001998 /* do nothing */
1999 } else {
2000 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002001 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002002 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002003 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002004 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002005 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002006 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002007 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002008 }
2009 len -= l;
2010 buf += l;
2011 addr += l;
2012 }
2013}
2014
aliguori6d16c2f2009-01-22 16:59:11 +00002015typedef struct {
2016 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002017 hwaddr addr;
2018 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002019} BounceBuffer;
2020
2021static BounceBuffer bounce;
2022
aliguoriba223c22009-01-22 16:59:16 +00002023typedef struct MapClient {
2024 void *opaque;
2025 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002026 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002027} MapClient;
2028
Blue Swirl72cf2d42009-09-12 07:36:22 +00002029static QLIST_HEAD(map_client_list, MapClient) map_client_list
2030 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002031
2032void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2033{
Anthony Liguori7267c092011-08-20 22:09:37 -05002034 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002035
2036 client->opaque = opaque;
2037 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002038 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002039 return client;
2040}
2041
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002042static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002043{
2044 MapClient *client = (MapClient *)_client;
2045
Blue Swirl72cf2d42009-09-12 07:36:22 +00002046 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002047 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002048}
2049
2050static void cpu_notify_map_clients(void)
2051{
2052 MapClient *client;
2053
Blue Swirl72cf2d42009-09-12 07:36:22 +00002054 while (!QLIST_EMPTY(&map_client_list)) {
2055 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002056 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002057 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002058 }
2059}
2060
aliguori6d16c2f2009-01-22 16:59:11 +00002061/* Map a physical memory region into a host virtual address.
2062 * May map a subset of the requested range, given by and returned in *plen.
2063 * May return NULL if resources needed to perform the mapping are exhausted.
2064 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002065 * Use cpu_register_map_client() to know when retrying the map operation is
2066 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002067 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002068void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002069 hwaddr addr,
2070 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002071 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002072{
Avi Kivityac1970f2012-10-03 16:22:53 +02002073 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002074 hwaddr len = *plen;
2075 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002076 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002077 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002078 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002079 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002080 ram_addr_t rlen;
2081 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002082
2083 while (len > 0) {
2084 page = addr & TARGET_PAGE_MASK;
2085 l = (page + TARGET_PAGE_SIZE) - addr;
2086 if (l > len)
2087 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002088 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002089
Avi Kivityf3705d52012-03-08 16:16:34 +02002090 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002091 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002092 break;
2093 }
2094 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2095 bounce.addr = addr;
2096 bounce.len = l;
2097 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002098 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002099 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002100
2101 *plen = l;
2102 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002103 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002104 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002105 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002106 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002107 }
aliguori6d16c2f2009-01-22 16:59:11 +00002108
2109 len -= l;
2110 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002111 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002112 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002113 rlen = todo;
2114 ret = qemu_ram_ptr_length(raddr, &rlen);
2115 *plen = rlen;
2116 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002117}
2118
Avi Kivityac1970f2012-10-03 16:22:53 +02002119/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002120 * Will also mark the memory as dirty if is_write == 1. access_len gives
2121 * the amount of memory that was actually read or written by the caller.
2122 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002123void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2124 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002125{
2126 if (buffer != bounce.buffer) {
2127 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002128 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002129 while (access_len) {
2130 unsigned l;
2131 l = TARGET_PAGE_SIZE;
2132 if (l > access_len)
2133 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002134 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002135 addr1 += l;
2136 access_len -= l;
2137 }
2138 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002139 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002140 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002141 }
aliguori6d16c2f2009-01-22 16:59:11 +00002142 return;
2143 }
2144 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002145 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002146 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002147 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002148 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002149 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002150}
bellardd0ecd2a2006-04-23 17:14:48 +00002151
Avi Kivitya8170e52012-10-23 12:30:10 +02002152void *cpu_physical_memory_map(hwaddr addr,
2153 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002154 int is_write)
2155{
2156 return address_space_map(&address_space_memory, addr, plen, is_write);
2157}
2158
Avi Kivitya8170e52012-10-23 12:30:10 +02002159void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2160 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002161{
2162 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2163}
2164
bellard8df1cd02005-01-28 22:37:22 +00002165/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002166static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002167 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002168{
bellard8df1cd02005-01-28 22:37:22 +00002169 uint8_t *ptr;
2170 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002171 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002172
Avi Kivityac1970f2012-10-03 16:22:53 +02002173 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002174
Blue Swirlcc5bea62012-04-14 14:56:48 +00002175 if (!(memory_region_is_ram(section->mr) ||
2176 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002177 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002178 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002179 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002180#if defined(TARGET_WORDS_BIGENDIAN)
2181 if (endian == DEVICE_LITTLE_ENDIAN) {
2182 val = bswap32(val);
2183 }
2184#else
2185 if (endian == DEVICE_BIG_ENDIAN) {
2186 val = bswap32(val);
2187 }
2188#endif
bellard8df1cd02005-01-28 22:37:22 +00002189 } else {
2190 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002191 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002192 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002193 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002194 switch (endian) {
2195 case DEVICE_LITTLE_ENDIAN:
2196 val = ldl_le_p(ptr);
2197 break;
2198 case DEVICE_BIG_ENDIAN:
2199 val = ldl_be_p(ptr);
2200 break;
2201 default:
2202 val = ldl_p(ptr);
2203 break;
2204 }
bellard8df1cd02005-01-28 22:37:22 +00002205 }
2206 return val;
2207}
2208
Avi Kivitya8170e52012-10-23 12:30:10 +02002209uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002210{
2211 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2212}
2213
Avi Kivitya8170e52012-10-23 12:30:10 +02002214uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002215{
2216 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2217}
2218
Avi Kivitya8170e52012-10-23 12:30:10 +02002219uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002220{
2221 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2222}
2223
bellard84b7b8e2005-11-28 21:19:04 +00002224/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002225static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002226 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002227{
bellard84b7b8e2005-11-28 21:19:04 +00002228 uint8_t *ptr;
2229 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002230 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002231
Avi Kivityac1970f2012-10-03 16:22:53 +02002232 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002233
Blue Swirlcc5bea62012-04-14 14:56:48 +00002234 if (!(memory_region_is_ram(section->mr) ||
2235 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002236 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002237 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002238
2239 /* XXX This is broken when device endian != cpu endian.
2240 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002241#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002242 val = io_mem_read(section->mr, addr, 4) << 32;
2243 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002244#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002245 val = io_mem_read(section->mr, addr, 4);
2246 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002247#endif
2248 } else {
2249 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002250 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002251 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002252 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002253 switch (endian) {
2254 case DEVICE_LITTLE_ENDIAN:
2255 val = ldq_le_p(ptr);
2256 break;
2257 case DEVICE_BIG_ENDIAN:
2258 val = ldq_be_p(ptr);
2259 break;
2260 default:
2261 val = ldq_p(ptr);
2262 break;
2263 }
bellard84b7b8e2005-11-28 21:19:04 +00002264 }
2265 return val;
2266}
2267
Avi Kivitya8170e52012-10-23 12:30:10 +02002268uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002269{
2270 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2271}
2272
Avi Kivitya8170e52012-10-23 12:30:10 +02002273uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002274{
2275 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2276}
2277
Avi Kivitya8170e52012-10-23 12:30:10 +02002278uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002279{
2280 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2281}
2282
bellardaab33092005-10-30 20:48:42 +00002283/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002284uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002285{
2286 uint8_t val;
2287 cpu_physical_memory_read(addr, &val, 1);
2288 return val;
2289}
2290
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002291/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002292static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002293 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002294{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002295 uint8_t *ptr;
2296 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002297 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002298
Avi Kivityac1970f2012-10-03 16:22:53 +02002299 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002300
Blue Swirlcc5bea62012-04-14 14:56:48 +00002301 if (!(memory_region_is_ram(section->mr) ||
2302 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002303 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002304 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002305 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002306#if defined(TARGET_WORDS_BIGENDIAN)
2307 if (endian == DEVICE_LITTLE_ENDIAN) {
2308 val = bswap16(val);
2309 }
2310#else
2311 if (endian == DEVICE_BIG_ENDIAN) {
2312 val = bswap16(val);
2313 }
2314#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002315 } else {
2316 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002317 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002318 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002319 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002320 switch (endian) {
2321 case DEVICE_LITTLE_ENDIAN:
2322 val = lduw_le_p(ptr);
2323 break;
2324 case DEVICE_BIG_ENDIAN:
2325 val = lduw_be_p(ptr);
2326 break;
2327 default:
2328 val = lduw_p(ptr);
2329 break;
2330 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002331 }
2332 return val;
bellardaab33092005-10-30 20:48:42 +00002333}
2334
Avi Kivitya8170e52012-10-23 12:30:10 +02002335uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002336{
2337 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2338}
2339
Avi Kivitya8170e52012-10-23 12:30:10 +02002340uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002341{
2342 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2343}
2344
Avi Kivitya8170e52012-10-23 12:30:10 +02002345uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002346{
2347 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2348}
2349
bellard8df1cd02005-01-28 22:37:22 +00002350/* warning: addr must be aligned. The ram page is not masked as dirty
2351 and the code inside is not invalidated. It is useful if the dirty
2352 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002353void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002354{
bellard8df1cd02005-01-28 22:37:22 +00002355 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002356 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002357
Avi Kivityac1970f2012-10-03 16:22:53 +02002358 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002359
Avi Kivityf3705d52012-03-08 16:16:34 +02002360 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002361 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002362 if (memory_region_is_ram(section->mr)) {
2363 section = &phys_sections[phys_section_rom];
2364 }
2365 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002366 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002367 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002368 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002369 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002370 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002371 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002372
2373 if (unlikely(in_migration)) {
2374 if (!cpu_physical_memory_is_dirty(addr1)) {
2375 /* invalidate code */
2376 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2377 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002378 cpu_physical_memory_set_dirty_flags(
2379 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002380 }
2381 }
bellard8df1cd02005-01-28 22:37:22 +00002382 }
2383}
2384
Avi Kivitya8170e52012-10-23 12:30:10 +02002385void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002386{
j_mayerbc98a7e2007-04-04 07:55:12 +00002387 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002388 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002389
Avi Kivityac1970f2012-10-03 16:22:53 +02002390 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002391
Avi Kivityf3705d52012-03-08 16:16:34 +02002392 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002393 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002394 if (memory_region_is_ram(section->mr)) {
2395 section = &phys_sections[phys_section_rom];
2396 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002397#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002398 io_mem_write(section->mr, addr, val >> 32, 4);
2399 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002400#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002401 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2402 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002403#endif
2404 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002405 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002406 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002407 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002408 stq_p(ptr, val);
2409 }
2410}
2411
bellard8df1cd02005-01-28 22:37:22 +00002412/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002413static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002414 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002415{
bellard8df1cd02005-01-28 22:37:22 +00002416 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002417 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002418
Avi Kivityac1970f2012-10-03 16:22:53 +02002419 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002420
Avi Kivityf3705d52012-03-08 16:16:34 +02002421 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002422 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002423 if (memory_region_is_ram(section->mr)) {
2424 section = &phys_sections[phys_section_rom];
2425 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002426#if defined(TARGET_WORDS_BIGENDIAN)
2427 if (endian == DEVICE_LITTLE_ENDIAN) {
2428 val = bswap32(val);
2429 }
2430#else
2431 if (endian == DEVICE_BIG_ENDIAN) {
2432 val = bswap32(val);
2433 }
2434#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002435 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002436 } else {
2437 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002438 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002439 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002440 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002441 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002442 switch (endian) {
2443 case DEVICE_LITTLE_ENDIAN:
2444 stl_le_p(ptr, val);
2445 break;
2446 case DEVICE_BIG_ENDIAN:
2447 stl_be_p(ptr, val);
2448 break;
2449 default:
2450 stl_p(ptr, val);
2451 break;
2452 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002453 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002454 }
2455}
2456
Avi Kivitya8170e52012-10-23 12:30:10 +02002457void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002458{
2459 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2460}
2461
Avi Kivitya8170e52012-10-23 12:30:10 +02002462void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002463{
2464 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2465}
2466
Avi Kivitya8170e52012-10-23 12:30:10 +02002467void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002468{
2469 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2470}
2471
bellardaab33092005-10-30 20:48:42 +00002472/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002473void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002474{
2475 uint8_t v = val;
2476 cpu_physical_memory_write(addr, &v, 1);
2477}
2478
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002479/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002480static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002481 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002482{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002483 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002484 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002485
Avi Kivityac1970f2012-10-03 16:22:53 +02002486 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002487
Avi Kivityf3705d52012-03-08 16:16:34 +02002488 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002489 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002490 if (memory_region_is_ram(section->mr)) {
2491 section = &phys_sections[phys_section_rom];
2492 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002493#if defined(TARGET_WORDS_BIGENDIAN)
2494 if (endian == DEVICE_LITTLE_ENDIAN) {
2495 val = bswap16(val);
2496 }
2497#else
2498 if (endian == DEVICE_BIG_ENDIAN) {
2499 val = bswap16(val);
2500 }
2501#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002502 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002503 } else {
2504 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002505 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002506 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002507 /* RAM case */
2508 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002509 switch (endian) {
2510 case DEVICE_LITTLE_ENDIAN:
2511 stw_le_p(ptr, val);
2512 break;
2513 case DEVICE_BIG_ENDIAN:
2514 stw_be_p(ptr, val);
2515 break;
2516 default:
2517 stw_p(ptr, val);
2518 break;
2519 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002520 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002521 }
bellardaab33092005-10-30 20:48:42 +00002522}
2523
Avi Kivitya8170e52012-10-23 12:30:10 +02002524void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002525{
2526 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2527}
2528
Avi Kivitya8170e52012-10-23 12:30:10 +02002529void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002530{
2531 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2532}
2533
Avi Kivitya8170e52012-10-23 12:30:10 +02002534void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002535{
2536 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2537}
2538
bellardaab33092005-10-30 20:48:42 +00002539/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002540void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002541{
2542 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002543 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002544}
2545
Avi Kivitya8170e52012-10-23 12:30:10 +02002546void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002547{
2548 val = cpu_to_le64(val);
2549 cpu_physical_memory_write(addr, &val, 8);
2550}
2551
Avi Kivitya8170e52012-10-23 12:30:10 +02002552void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002553{
2554 val = cpu_to_be64(val);
2555 cpu_physical_memory_write(addr, &val, 8);
2556}
2557
aliguori5e2972f2009-03-28 17:51:36 +00002558/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002559int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002560 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002561{
2562 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002563 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002564 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002565
2566 while (len > 0) {
2567 page = addr & TARGET_PAGE_MASK;
2568 phys_addr = cpu_get_phys_page_debug(env, page);
2569 /* if no physical page mapped, return an error */
2570 if (phys_addr == -1)
2571 return -1;
2572 l = (page + TARGET_PAGE_SIZE) - addr;
2573 if (l > len)
2574 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002575 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002576 if (is_write)
2577 cpu_physical_memory_write_rom(phys_addr, buf, l);
2578 else
aliguori5e2972f2009-03-28 17:51:36 +00002579 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002580 len -= l;
2581 buf += l;
2582 addr += l;
2583 }
2584 return 0;
2585}
Paul Brooka68fe892010-03-01 00:08:59 +00002586#endif
bellard13eb76e2004-01-24 15:23:36 +00002587
Blue Swirl8e4a4242013-01-06 18:30:17 +00002588#if !defined(CONFIG_USER_ONLY)
2589
2590/*
2591 * A helper function for the _utterly broken_ virtio device model to find out if
2592 * it's running on a big endian machine. Don't do this at home kids!
2593 */
2594bool virtio_is_big_endian(void);
2595bool virtio_is_big_endian(void)
2596{
2597#if defined(TARGET_WORDS_BIGENDIAN)
2598 return true;
2599#else
2600 return false;
2601#endif
2602}
2603
2604#endif
2605
Wen Congyang76f35532012-05-07 12:04:18 +08002606#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002607bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002608{
2609 MemoryRegionSection *section;
2610
Avi Kivityac1970f2012-10-03 16:22:53 +02002611 section = phys_page_find(address_space_memory.dispatch,
2612 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002613
2614 return !(memory_region_is_ram(section->mr) ||
2615 memory_region_is_romd(section->mr));
2616}
2617#endif