blob: b85508ba30d4d5d3714e0eae5f38c2d16e570f25 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
pbrook9656f322008-07-01 20:01:19 +0000222#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100226 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
230 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000231 tlb_flush(env, 1);
232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100243 VMSTATE_UINT32(halted, CPUArchState),
244 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
pbrook9656f322008-07-01 20:01:19 +0000248#endif
249
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100250CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400251{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100252 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100253 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400254
255 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100256 cpu = ENV_GET_CPU(env);
257 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400258 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100259 }
Glauber Costa950f1472009-06-09 12:15:18 -0400260 env = env->next_cpu;
261 }
262
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100263 return cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400264}
265
Andreas Färber9349b4f2012-03-14 01:38:32 +0100266void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000267{
Andreas Färber9f09e182012-05-03 06:59:07 +0200268 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100269 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000270 int cpu_index;
271
pbrookc2764712009-03-07 15:24:59 +0000272#if defined(CONFIG_USER_ONLY)
273 cpu_list_lock();
274#endif
bellard6a00d602005-11-21 23:25:50 +0000275 env->next_cpu = NULL;
276 penv = &first_cpu;
277 cpu_index = 0;
278 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700279 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000280 cpu_index++;
281 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100282 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100283 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000284 QTAILQ_INIT(&env->breakpoints);
285 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100286#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200287 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100288#endif
bellard6a00d602005-11-21 23:25:50 +0000289 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000290#if defined(CONFIG_USER_ONLY)
291 cpu_list_unlock();
292#endif
pbrookb3c77242008-06-30 16:31:04 +0000293#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600294 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
295 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000296 cpu_save, cpu_load, env);
297#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000298}
299
bellard1fddef42005-04-17 19:16:13 +0000300#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000301#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100302static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000303{
304 tb_invalidate_phys_page_range(pc, pc + 1, 0);
305}
306#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400307static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
308{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400309 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
310 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400311}
bellardc27004e2005-01-03 23:35:10 +0000312#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000313#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000314
Paul Brookc527ee82010-03-01 03:31:14 +0000315#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100316void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000317
318{
319}
320
Andreas Färber9349b4f2012-03-14 01:38:32 +0100321int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000322 int flags, CPUWatchpoint **watchpoint)
323{
324 return -ENOSYS;
325}
326#else
pbrook6658ffb2007-03-16 23:58:11 +0000327/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100328int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000329 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000330{
aliguorib4051332008-11-18 20:14:20 +0000331 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000332 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000333
aliguorib4051332008-11-18 20:14:20 +0000334 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400335 if ((len & (len - 1)) || (addr & ~len_mask) ||
336 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000337 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
338 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
339 return -EINVAL;
340 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500341 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000342
aliguoria1d1bb32008-11-18 20:07:32 +0000343 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000344 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000345 wp->flags = flags;
346
aliguori2dc9f412008-11-18 20:56:59 +0000347 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000348 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000349 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000350 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000351 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000352
pbrook6658ffb2007-03-16 23:58:11 +0000353 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000354
355 if (watchpoint)
356 *watchpoint = wp;
357 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000358}
359
aliguoria1d1bb32008-11-18 20:07:32 +0000360/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100361int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000362 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000363{
aliguorib4051332008-11-18 20:14:20 +0000364 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000365 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000366
Blue Swirl72cf2d42009-09-12 07:36:22 +0000367 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000368 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000369 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000370 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000371 return 0;
372 }
373 }
aliguoria1d1bb32008-11-18 20:07:32 +0000374 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000375}
376
aliguoria1d1bb32008-11-18 20:07:32 +0000377/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100378void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000379{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000380 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000381
aliguoria1d1bb32008-11-18 20:07:32 +0000382 tlb_flush_page(env, watchpoint->vaddr);
383
Anthony Liguori7267c092011-08-20 22:09:37 -0500384 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000385}
386
aliguoria1d1bb32008-11-18 20:07:32 +0000387/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100388void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000389{
aliguoric0ce9982008-11-25 22:13:57 +0000390 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000391
Blue Swirl72cf2d42009-09-12 07:36:22 +0000392 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000393 if (wp->flags & mask)
394 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000395 }
aliguoria1d1bb32008-11-18 20:07:32 +0000396}
Paul Brookc527ee82010-03-01 03:31:14 +0000397#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000398
399/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100400int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000401 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000402{
bellard1fddef42005-04-17 19:16:13 +0000403#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000404 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000405
Anthony Liguori7267c092011-08-20 22:09:37 -0500406 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000407
408 bp->pc = pc;
409 bp->flags = flags;
410
aliguori2dc9f412008-11-18 20:56:59 +0000411 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000412 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000413 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000414 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000415 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000416
417 breakpoint_invalidate(env, pc);
418
419 if (breakpoint)
420 *breakpoint = bp;
421 return 0;
422#else
423 return -ENOSYS;
424#endif
425}
426
427/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100428int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000429{
430#if defined(TARGET_HAS_ICE)
431 CPUBreakpoint *bp;
432
Blue Swirl72cf2d42009-09-12 07:36:22 +0000433 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000434 if (bp->pc == pc && bp->flags == flags) {
435 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000436 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000437 }
bellard4c3a88a2003-07-26 12:06:08 +0000438 }
aliguoria1d1bb32008-11-18 20:07:32 +0000439 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000440#else
aliguoria1d1bb32008-11-18 20:07:32 +0000441 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000442#endif
443}
444
aliguoria1d1bb32008-11-18 20:07:32 +0000445/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100446void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000447{
bellard1fddef42005-04-17 19:16:13 +0000448#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000449 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000450
aliguoria1d1bb32008-11-18 20:07:32 +0000451 breakpoint_invalidate(env, breakpoint->pc);
452
Anthony Liguori7267c092011-08-20 22:09:37 -0500453 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000454#endif
455}
456
457/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100458void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000459{
460#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000461 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000462
Blue Swirl72cf2d42009-09-12 07:36:22 +0000463 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000464 if (bp->flags & mask)
465 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000466 }
bellard4c3a88a2003-07-26 12:06:08 +0000467#endif
468}
469
bellardc33a3462003-07-29 20:50:33 +0000470/* enable or disable single step mode. EXCP_DEBUG is returned by the
471 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100472void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000473{
bellard1fddef42005-04-17 19:16:13 +0000474#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000475 if (env->singlestep_enabled != enabled) {
476 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000477 if (kvm_enabled())
478 kvm_update_guest_debug(env, 0);
479 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100480 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000481 /* XXX: only flush what is necessary */
482 tb_flush(env);
483 }
bellardc33a3462003-07-29 20:50:33 +0000484 }
485#endif
486}
487
Andreas Färber9349b4f2012-03-14 01:38:32 +0100488void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +0000489{
490 env->interrupt_request &= ~mask;
491}
492
Andreas Färber9349b4f2012-03-14 01:38:32 +0100493void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000494{
495 env->exit_request = 1;
496 cpu_unlink_tb(env);
497}
498
Andreas Färber9349b4f2012-03-14 01:38:32 +0100499void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000500{
501 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000502 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000503
504 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000505 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000506 fprintf(stderr, "qemu: fatal: ");
507 vfprintf(stderr, fmt, ap);
508 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100509 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000510 if (qemu_log_enabled()) {
511 qemu_log("qemu: fatal: ");
512 qemu_log_vprintf(fmt, ap2);
513 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100514 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000515 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000516 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000517 }
pbrook493ae1f2007-11-23 16:53:59 +0000518 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000519 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200520#if defined(CONFIG_USER_ONLY)
521 {
522 struct sigaction act;
523 sigfillset(&act.sa_mask);
524 act.sa_handler = SIG_DFL;
525 sigaction(SIGABRT, &act, NULL);
526 }
527#endif
bellard75012672003-06-21 13:11:07 +0000528 abort();
529}
530
Andreas Färber9349b4f2012-03-14 01:38:32 +0100531CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000532{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100533 CPUArchState *new_env = cpu_init(env->cpu_model_str);
534 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000535#if defined(TARGET_HAS_ICE)
536 CPUBreakpoint *bp;
537 CPUWatchpoint *wp;
538#endif
539
Andreas Färber9349b4f2012-03-14 01:38:32 +0100540 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000541
Andreas Färber55e5c282012-12-17 06:18:02 +0100542 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000543 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000544
545 /* Clone all break/watchpoints.
546 Note: Once we support ptrace with hw-debug register access, make sure
547 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000548 QTAILQ_INIT(&env->breakpoints);
549 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000550#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000551 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000552 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
553 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000554 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000555 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
556 wp->flags, NULL);
557 }
558#endif
559
thsc5be9f02007-02-28 20:20:53 +0000560 return new_env;
561}
562
bellard01243112004-01-04 15:48:17 +0000563#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200564static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
565 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000566{
Juan Quintelad24981d2012-05-22 00:42:40 +0200567 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000568
bellard1ccde1c2004-02-06 19:46:14 +0000569 /* we modify the TLB cache so that the dirty bit will be set again
570 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200571 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200572 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000573 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200574 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000575 != (end - 1) - start) {
576 abort();
577 }
Blue Swirle5548612012-04-21 13:08:33 +0000578 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200579
580}
581
582/* Note: start and end must be within the same ram block. */
583void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
584 int dirty_flags)
585{
586 uintptr_t length;
587
588 start &= TARGET_PAGE_MASK;
589 end = TARGET_PAGE_ALIGN(end);
590
591 length = end - start;
592 if (length == 0)
593 return;
594 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
595
596 if (tcg_enabled()) {
597 tlb_reset_dirty_range_all(start, end, length);
598 }
bellard1ccde1c2004-02-06 19:46:14 +0000599}
600
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000601static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000602{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200603 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000604 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200605 return ret;
aliguori74576192008-10-06 14:02:03 +0000606}
607
Avi Kivitya8170e52012-10-23 12:30:10 +0200608hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000609 MemoryRegionSection *section,
610 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200611 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000612 int prot,
613 target_ulong *address)
614{
Avi Kivitya8170e52012-10-23 12:30:10 +0200615 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000616 CPUWatchpoint *wp;
617
Blue Swirlcc5bea62012-04-14 14:56:48 +0000618 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000619 /* Normal RAM. */
620 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000621 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000622 if (!section->readonly) {
623 iotlb |= phys_section_notdirty;
624 } else {
625 iotlb |= phys_section_rom;
626 }
627 } else {
628 /* IO handlers are currently passed a physical address.
629 It would be nice to pass an offset from the base address
630 of that region. This would avoid having to special case RAM,
631 and avoid full address decoding in every device.
632 We can't use the high bits of pd for this because
633 IO_MEM_ROMD uses these as a ram address. */
634 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000635 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000636 }
637
638 /* Make accesses to pages with watchpoints go via the
639 watchpoint trap routines. */
640 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
641 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
642 /* Avoid trapping reads of pages with a write breakpoint. */
643 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
644 iotlb = phys_section_watch + paddr;
645 *address |= TLB_MMIO;
646 break;
647 }
648 }
649 }
650
651 return iotlb;
652}
bellard9fa3e852004-01-04 18:06:42 +0000653#endif /* defined(CONFIG_USER_ONLY) */
654
pbrooke2eef172008-06-08 01:09:01 +0000655#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000656
Paul Brookc04b2b72010-03-01 03:31:14 +0000657#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
658typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200659 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200660 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200661 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000662} subpage_t;
663
Anthony Liguoric227f092009-10-01 16:12:16 -0500664static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200665 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200666static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200667static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200668{
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 MemoryRegionSection *section = &phys_sections[section_index];
670 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200671
672 if (mr->subpage) {
673 subpage_t *subpage = container_of(mr, subpage_t, iomem);
674 memory_region_destroy(&subpage->iomem);
675 g_free(subpage);
676 }
677}
678
Avi Kivity4346ae32012-02-10 17:00:01 +0200679static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200680{
681 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200682 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200683
Avi Kivityc19e8802012-02-13 20:25:31 +0200684 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200685 return;
686 }
687
Avi Kivityc19e8802012-02-13 20:25:31 +0200688 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200689 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200690 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200691 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200692 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200693 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200694 }
Avi Kivity54688b12012-02-09 17:34:32 +0200695 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200696 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200697 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200698}
699
Avi Kivityac1970f2012-10-03 16:22:53 +0200700static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200701{
Avi Kivityac1970f2012-10-03 16:22:53 +0200702 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200703 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200704}
705
Avi Kivity5312bd82012-02-12 18:32:55 +0200706static uint16_t phys_section_add(MemoryRegionSection *section)
707{
708 if (phys_sections_nb == phys_sections_nb_alloc) {
709 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
710 phys_sections = g_renew(MemoryRegionSection, phys_sections,
711 phys_sections_nb_alloc);
712 }
713 phys_sections[phys_sections_nb] = *section;
714 return phys_sections_nb++;
715}
716
717static void phys_sections_clear(void)
718{
719 phys_sections_nb = 0;
720}
721
Avi Kivityac1970f2012-10-03 16:22:53 +0200722static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200723{
724 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200725 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200726 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200727 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200728 MemoryRegionSection subsection = {
729 .offset_within_address_space = base,
730 .size = TARGET_PAGE_SIZE,
731 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200732 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200733
Avi Kivityf3705d52012-03-08 16:16:34 +0200734 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200735
Avi Kivityf3705d52012-03-08 16:16:34 +0200736 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737 subpage = subpage_init(base);
738 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200739 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200740 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200741 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200742 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200743 }
744 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400745 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200746 subpage_register(subpage, start, end, phys_section_add(section));
747}
748
749
Avi Kivityac1970f2012-10-03 16:22:53 +0200750static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000751{
Avi Kivitya8170e52012-10-23 12:30:10 +0200752 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200753 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200754 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200755 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200756
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200757 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200758
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200759 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200760 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200761 section_index);
bellard33417e72003-08-10 21:47:01 +0000762}
763
Avi Kivityac1970f2012-10-03 16:22:53 +0200764static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200765{
Avi Kivityac1970f2012-10-03 16:22:53 +0200766 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200767 MemoryRegionSection now = *section, remain = *section;
768
769 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
770 || (now.size < TARGET_PAGE_SIZE)) {
771 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
772 - now.offset_within_address_space,
773 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200774 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200775 remain.size -= now.size;
776 remain.offset_within_address_space += now.size;
777 remain.offset_within_region += now.size;
778 }
Tyler Hall69b67642012-07-25 18:45:04 -0400779 while (remain.size >= TARGET_PAGE_SIZE) {
780 now = remain;
781 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
782 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200783 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400784 } else {
785 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200786 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400787 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200788 remain.size -= now.size;
789 remain.offset_within_address_space += now.size;
790 remain.offset_within_region += now.size;
791 }
792 now = remain;
793 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200794 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200795 }
796}
797
Sheng Yang62a27442010-01-26 19:21:16 +0800798void qemu_flush_coalesced_mmio_buffer(void)
799{
800 if (kvm_enabled())
801 kvm_flush_coalesced_mmio_buffer();
802}
803
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700804void qemu_mutex_lock_ramlist(void)
805{
806 qemu_mutex_lock(&ram_list.mutex);
807}
808
809void qemu_mutex_unlock_ramlist(void)
810{
811 qemu_mutex_unlock(&ram_list.mutex);
812}
813
Marcelo Tosattic9027602010-03-01 20:25:08 -0300814#if defined(__linux__) && !defined(TARGET_S390X)
815
816#include <sys/vfs.h>
817
818#define HUGETLBFS_MAGIC 0x958458f6
819
820static long gethugepagesize(const char *path)
821{
822 struct statfs fs;
823 int ret;
824
825 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900826 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300827 } while (ret != 0 && errno == EINTR);
828
829 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900830 perror(path);
831 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300832 }
833
834 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900835 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300836
837 return fs.f_bsize;
838}
839
Alex Williamson04b16652010-07-02 11:13:17 -0600840static void *file_ram_alloc(RAMBlock *block,
841 ram_addr_t memory,
842 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300843{
844 char *filename;
845 void *area;
846 int fd;
847#ifdef MAP_POPULATE
848 int flags;
849#endif
850 unsigned long hpagesize;
851
852 hpagesize = gethugepagesize(path);
853 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900854 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300855 }
856
857 if (memory < hpagesize) {
858 return NULL;
859 }
860
861 if (kvm_enabled() && !kvm_has_sync_mmu()) {
862 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
863 return NULL;
864 }
865
Stefan Weile4ada482013-01-16 18:37:23 +0100866 filename = g_strdup_printf("%s/qemu_back_mem.XXXXXX", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300867
868 fd = mkstemp(filename);
869 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900870 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100871 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900872 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300873 }
874 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100875 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300876
877 memory = (memory+hpagesize-1) & ~(hpagesize-1);
878
879 /*
880 * ftruncate is not supported by hugetlbfs in older
881 * hosts, so don't bother bailing out on errors.
882 * If anything goes wrong with it under other filesystems,
883 * mmap will fail.
884 */
885 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900886 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300887
888#ifdef MAP_POPULATE
889 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
890 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
891 * to sidestep this quirk.
892 */
893 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
894 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
895#else
896 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
897#endif
898 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900899 perror("file_ram_alloc: can't mmap RAM pages");
900 close(fd);
901 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300902 }
Alex Williamson04b16652010-07-02 11:13:17 -0600903 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300904 return area;
905}
906#endif
907
Alex Williamsond17b5282010-06-25 11:08:38 -0600908static ram_addr_t find_ram_offset(ram_addr_t size)
909{
Alex Williamson04b16652010-07-02 11:13:17 -0600910 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600911 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600912
Paolo Bonzinia3161032012-11-14 15:54:48 +0100913 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600914 return 0;
915
Paolo Bonzinia3161032012-11-14 15:54:48 +0100916 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000917 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600918
919 end = block->offset + block->length;
920
Paolo Bonzinia3161032012-11-14 15:54:48 +0100921 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600922 if (next_block->offset >= end) {
923 next = MIN(next, next_block->offset);
924 }
925 }
926 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600927 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600928 mingap = next - end;
929 }
930 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600931
932 if (offset == RAM_ADDR_MAX) {
933 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
934 (uint64_t)size);
935 abort();
936 }
937
Alex Williamson04b16652010-07-02 11:13:17 -0600938 return offset;
939}
940
Juan Quintela652d7ec2012-07-20 10:37:54 +0200941ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600942{
Alex Williamsond17b5282010-06-25 11:08:38 -0600943 RAMBlock *block;
944 ram_addr_t last = 0;
945
Paolo Bonzinia3161032012-11-14 15:54:48 +0100946 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600947 last = MAX(last, block->offset + block->length);
948
949 return last;
950}
951
Jason Baronddb97f12012-08-02 15:44:16 -0400952static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
953{
954 int ret;
955 QemuOpts *machine_opts;
956
957 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
958 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
959 if (machine_opts &&
960 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
961 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
962 if (ret) {
963 perror("qemu_madvise");
964 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
965 "but dump_guest_core=off specified\n");
966 }
967 }
968}
969
Avi Kivityc5705a72011-12-20 15:59:12 +0200970void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600971{
972 RAMBlock *new_block, *block;
973
Avi Kivityc5705a72011-12-20 15:59:12 +0200974 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100975 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200976 if (block->offset == addr) {
977 new_block = block;
978 break;
979 }
980 }
981 assert(new_block);
982 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600983
Anthony Liguori09e5ab62012-02-03 12:28:43 -0600984 if (dev) {
985 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600986 if (id) {
987 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -0500988 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -0600989 }
990 }
991 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
992
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700993 /* This assumes the iothread lock is taken here too. */
994 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +0100995 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200996 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -0600997 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
998 new_block->idstr);
999 abort();
1000 }
1001 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001002 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001003}
1004
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001005static int memory_try_enable_merging(void *addr, size_t len)
1006{
1007 QemuOpts *opts;
1008
1009 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1010 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1011 /* disabled by the user */
1012 return 0;
1013 }
1014
1015 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1016}
1017
Avi Kivityc5705a72011-12-20 15:59:12 +02001018ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1019 MemoryRegion *mr)
1020{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001021 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001022
1023 size = TARGET_PAGE_ALIGN(size);
1024 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001025
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001026 /* This assumes the iothread lock is taken here too. */
1027 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001028 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001029 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001030 if (host) {
1031 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001032 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001033 } else {
1034 if (mem_path) {
1035#if defined (__linux__) && !defined(TARGET_S390X)
1036 new_block->host = file_ram_alloc(new_block, size, mem_path);
1037 if (!new_block->host) {
1038 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001039 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001040 }
1041#else
1042 fprintf(stderr, "-mem-path option unsupported\n");
1043 exit(1);
1044#endif
1045 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001046 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001047 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001048 } else if (kvm_enabled()) {
1049 /* some s390/kvm configurations have special constraints */
1050 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001051 } else {
1052 new_block->host = qemu_vmalloc(size);
1053 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001054 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001055 }
1056 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001057 new_block->length = size;
1058
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001059 /* Keep the list sorted from biggest to smallest block. */
1060 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1061 if (block->length < new_block->length) {
1062 break;
1063 }
1064 }
1065 if (block) {
1066 QTAILQ_INSERT_BEFORE(block, new_block, next);
1067 } else {
1068 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1069 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001070 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001071
Umesh Deshpandef798b072011-08-18 11:41:17 -07001072 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001073 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001074
Anthony Liguori7267c092011-08-20 22:09:37 -05001075 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001076 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001077 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1078 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001079 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001080
Jason Baronddb97f12012-08-02 15:44:16 -04001081 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001082 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001083
Cam Macdonell84b89d72010-07-26 18:10:57 -06001084 if (kvm_enabled())
1085 kvm_setup_guest_memory(new_block->host, size);
1086
1087 return new_block->offset;
1088}
1089
Avi Kivityc5705a72011-12-20 15:59:12 +02001090ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001091{
Avi Kivityc5705a72011-12-20 15:59:12 +02001092 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001093}
bellarde9a1ab12007-02-08 23:08:38 +00001094
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001095void qemu_ram_free_from_ptr(ram_addr_t addr)
1096{
1097 RAMBlock *block;
1098
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001099 /* This assumes the iothread lock is taken here too. */
1100 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001101 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001102 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001103 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001104 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001105 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001106 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001107 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001108 }
1109 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001110 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001111}
1112
Anthony Liguoric227f092009-10-01 16:12:16 -05001113void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001114{
Alex Williamson04b16652010-07-02 11:13:17 -06001115 RAMBlock *block;
1116
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001117 /* This assumes the iothread lock is taken here too. */
1118 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001119 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001120 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001121 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001122 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001123 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001124 if (block->flags & RAM_PREALLOC_MASK) {
1125 ;
1126 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001127#if defined (__linux__) && !defined(TARGET_S390X)
1128 if (block->fd) {
1129 munmap(block->host, block->length);
1130 close(block->fd);
1131 } else {
1132 qemu_vfree(block->host);
1133 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001134#else
1135 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001136#endif
1137 } else {
1138#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1139 munmap(block->host, block->length);
1140#else
Jan Kiszka868bb332011-06-21 22:59:09 +02001141 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001142 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001143 } else {
1144 qemu_vfree(block->host);
1145 }
Alex Williamson04b16652010-07-02 11:13:17 -06001146#endif
1147 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001148 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001149 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001150 }
1151 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001152 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001153
bellarde9a1ab12007-02-08 23:08:38 +00001154}
1155
Huang Yingcd19cfa2011-03-02 08:56:19 +01001156#ifndef _WIN32
1157void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1158{
1159 RAMBlock *block;
1160 ram_addr_t offset;
1161 int flags;
1162 void *area, *vaddr;
1163
Paolo Bonzinia3161032012-11-14 15:54:48 +01001164 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001165 offset = addr - block->offset;
1166 if (offset < block->length) {
1167 vaddr = block->host + offset;
1168 if (block->flags & RAM_PREALLOC_MASK) {
1169 ;
1170 } else {
1171 flags = MAP_FIXED;
1172 munmap(vaddr, length);
1173 if (mem_path) {
1174#if defined(__linux__) && !defined(TARGET_S390X)
1175 if (block->fd) {
1176#ifdef MAP_POPULATE
1177 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1178 MAP_PRIVATE;
1179#else
1180 flags |= MAP_PRIVATE;
1181#endif
1182 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1183 flags, block->fd, offset);
1184 } else {
1185 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1186 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1187 flags, -1, 0);
1188 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001189#else
1190 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001191#endif
1192 } else {
1193#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1194 flags |= MAP_SHARED | MAP_ANONYMOUS;
1195 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1196 flags, -1, 0);
1197#else
1198 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1199 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1200 flags, -1, 0);
1201#endif
1202 }
1203 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001204 fprintf(stderr, "Could not remap addr: "
1205 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001206 length, addr);
1207 exit(1);
1208 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001209 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001210 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001211 }
1212 return;
1213 }
1214 }
1215}
1216#endif /* !_WIN32 */
1217
pbrookdc828ca2009-04-09 22:21:07 +00001218/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001219 With the exception of the softmmu code in this file, this should
1220 only be used for local memory (e.g. video ram) that the device owns,
1221 and knows it isn't going to access beyond the end of the block.
1222
1223 It should not be used for general purpose DMA.
1224 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1225 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001226void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001227{
pbrook94a6b542009-04-11 17:15:54 +00001228 RAMBlock *block;
1229
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001230 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001231 block = ram_list.mru_block;
1232 if (block && addr - block->offset < block->length) {
1233 goto found;
1234 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001235 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001236 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001237 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001238 }
pbrook94a6b542009-04-11 17:15:54 +00001239 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001240
1241 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1242 abort();
1243
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001244found:
1245 ram_list.mru_block = block;
1246 if (xen_enabled()) {
1247 /* We need to check if the requested address is in the RAM
1248 * because we don't want to map the entire memory in QEMU.
1249 * In that case just map until the end of the page.
1250 */
1251 if (block->offset == 0) {
1252 return xen_map_cache(addr, 0, 0);
1253 } else if (block->host == NULL) {
1254 block->host =
1255 xen_map_cache(block->offset, block->length, 1);
1256 }
1257 }
1258 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001259}
1260
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001261/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1262 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1263 *
1264 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001265 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001266static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001267{
1268 RAMBlock *block;
1269
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001270 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001271 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001272 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001273 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001274 /* We need to check if the requested address is in the RAM
1275 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001276 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001277 */
1278 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001279 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001280 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001281 block->host =
1282 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001283 }
1284 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001285 return block->host + (addr - block->offset);
1286 }
1287 }
1288
1289 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1290 abort();
1291
1292 return NULL;
1293}
1294
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001295/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1296 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001297static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001298{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001299 if (*size == 0) {
1300 return NULL;
1301 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001302 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001303 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001304 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001305 RAMBlock *block;
1306
Paolo Bonzinia3161032012-11-14 15:54:48 +01001307 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001308 if (addr - block->offset < block->length) {
1309 if (addr - block->offset + *size > block->length)
1310 *size = block->length - addr + block->offset;
1311 return block->host + (addr - block->offset);
1312 }
1313 }
1314
1315 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1316 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001317 }
1318}
1319
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001320void qemu_put_ram_ptr(void *addr)
1321{
1322 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001323}
1324
Marcelo Tosattie8902612010-10-11 15:31:19 -03001325int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001326{
pbrook94a6b542009-04-11 17:15:54 +00001327 RAMBlock *block;
1328 uint8_t *host = ptr;
1329
Jan Kiszka868bb332011-06-21 22:59:09 +02001330 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001331 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001332 return 0;
1333 }
1334
Paolo Bonzinia3161032012-11-14 15:54:48 +01001335 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001336 /* This case append when the block is not mapped. */
1337 if (block->host == NULL) {
1338 continue;
1339 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001340 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001341 *ram_addr = block->offset + (host - block->host);
1342 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001343 }
pbrook94a6b542009-04-11 17:15:54 +00001344 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001345
Marcelo Tosattie8902612010-10-11 15:31:19 -03001346 return -1;
1347}
Alex Williamsonf471a172010-06-11 11:11:42 -06001348
Marcelo Tosattie8902612010-10-11 15:31:19 -03001349/* Some of the softmmu routines need to translate from a host pointer
1350 (typically a TLB entry) back to a ram offset. */
1351ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1352{
1353 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001354
Marcelo Tosattie8902612010-10-11 15:31:19 -03001355 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1356 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1357 abort();
1358 }
1359 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001360}
1361
Avi Kivitya8170e52012-10-23 12:30:10 +02001362static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001363 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001364{
pbrook67d3b952006-12-18 05:03:52 +00001365#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001366 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001367#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001368#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001369 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001370#endif
1371 return 0;
1372}
1373
Avi Kivitya8170e52012-10-23 12:30:10 +02001374static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001375 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001376{
1377#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001378 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001379#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001380#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001381 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001382#endif
1383}
1384
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001385static const MemoryRegionOps unassigned_mem_ops = {
1386 .read = unassigned_mem_read,
1387 .write = unassigned_mem_write,
1388 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001389};
1390
Avi Kivitya8170e52012-10-23 12:30:10 +02001391static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001392 unsigned size)
1393{
1394 abort();
1395}
1396
Avi Kivitya8170e52012-10-23 12:30:10 +02001397static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001398 uint64_t value, unsigned size)
1399{
1400 abort();
1401}
1402
1403static const MemoryRegionOps error_mem_ops = {
1404 .read = error_mem_read,
1405 .write = error_mem_write,
1406 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001407};
1408
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001409static const MemoryRegionOps rom_mem_ops = {
1410 .read = error_mem_read,
1411 .write = unassigned_mem_write,
1412 .endianness = DEVICE_NATIVE_ENDIAN,
1413};
1414
Avi Kivitya8170e52012-10-23 12:30:10 +02001415static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001416 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001417{
bellard3a7d9292005-08-21 09:26:42 +00001418 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001419 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001420 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1421#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001422 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001423 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001424#endif
1425 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001426 switch (size) {
1427 case 1:
1428 stb_p(qemu_get_ram_ptr(ram_addr), val);
1429 break;
1430 case 2:
1431 stw_p(qemu_get_ram_ptr(ram_addr), val);
1432 break;
1433 case 4:
1434 stl_p(qemu_get_ram_ptr(ram_addr), val);
1435 break;
1436 default:
1437 abort();
1438 }
bellardf23db162005-08-21 19:12:28 +00001439 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001440 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001441 /* we remove the notdirty callback only if the code has been
1442 flushed */
1443 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001444 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001445}
1446
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001447static const MemoryRegionOps notdirty_mem_ops = {
1448 .read = error_mem_read,
1449 .write = notdirty_mem_write,
1450 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001451};
1452
pbrook0f459d12008-06-09 00:20:13 +00001453/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001454static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001455{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001456 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001457 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001458 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001459 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001460 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001461
aliguori06d55cc2008-11-18 20:24:06 +00001462 if (env->watchpoint_hit) {
1463 /* We re-entered the check after replacing the TB. Now raise
1464 * the debug interrupt so that is will trigger after the
1465 * current instruction. */
1466 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
1467 return;
1468 }
pbrook2e70f6e2008-06-29 01:03:05 +00001469 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001470 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001471 if ((vaddr == (wp->vaddr & len_mask) ||
1472 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001473 wp->flags |= BP_WATCHPOINT_HIT;
1474 if (!env->watchpoint_hit) {
1475 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001476 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001477 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1478 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001479 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001480 } else {
1481 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1482 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001483 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001484 }
aliguori06d55cc2008-11-18 20:24:06 +00001485 }
aliguori6e140f22008-11-18 20:37:55 +00001486 } else {
1487 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001488 }
1489 }
1490}
1491
pbrook6658ffb2007-03-16 23:58:11 +00001492/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1493 so these check for a hit then pass through to the normal out-of-line
1494 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001495static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001496 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001497{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001498 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1499 switch (size) {
1500 case 1: return ldub_phys(addr);
1501 case 2: return lduw_phys(addr);
1502 case 4: return ldl_phys(addr);
1503 default: abort();
1504 }
pbrook6658ffb2007-03-16 23:58:11 +00001505}
1506
Avi Kivitya8170e52012-10-23 12:30:10 +02001507static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001508 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001509{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001510 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1511 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001512 case 1:
1513 stb_phys(addr, val);
1514 break;
1515 case 2:
1516 stw_phys(addr, val);
1517 break;
1518 case 4:
1519 stl_phys(addr, val);
1520 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001521 default: abort();
1522 }
pbrook6658ffb2007-03-16 23:58:11 +00001523}
1524
Avi Kivity1ec9b902012-01-02 12:47:48 +02001525static const MemoryRegionOps watch_mem_ops = {
1526 .read = watch_mem_read,
1527 .write = watch_mem_write,
1528 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001529};
pbrook6658ffb2007-03-16 23:58:11 +00001530
Avi Kivitya8170e52012-10-23 12:30:10 +02001531static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001532 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001533{
Avi Kivity70c68e42012-01-02 12:32:48 +02001534 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001535 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001536 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001537#if defined(DEBUG_SUBPAGE)
1538 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1539 mmio, len, addr, idx);
1540#endif
blueswir1db7b5422007-05-26 17:36:03 +00001541
Avi Kivity5312bd82012-02-12 18:32:55 +02001542 section = &phys_sections[mmio->sub_section[idx]];
1543 addr += mmio->base;
1544 addr -= section->offset_within_address_space;
1545 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001546 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001547}
1548
Avi Kivitya8170e52012-10-23 12:30:10 +02001549static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001550 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001551{
Avi Kivity70c68e42012-01-02 12:32:48 +02001552 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001553 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001554 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001555#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001556 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1557 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001558 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001559#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001560
Avi Kivity5312bd82012-02-12 18:32:55 +02001561 section = &phys_sections[mmio->sub_section[idx]];
1562 addr += mmio->base;
1563 addr -= section->offset_within_address_space;
1564 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001565 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001566}
1567
Avi Kivity70c68e42012-01-02 12:32:48 +02001568static const MemoryRegionOps subpage_ops = {
1569 .read = subpage_read,
1570 .write = subpage_write,
1571 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001572};
1573
Avi Kivitya8170e52012-10-23 12:30:10 +02001574static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001575 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001576{
1577 ram_addr_t raddr = addr;
1578 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001579 switch (size) {
1580 case 1: return ldub_p(ptr);
1581 case 2: return lduw_p(ptr);
1582 case 4: return ldl_p(ptr);
1583 default: abort();
1584 }
Andreas Färber56384e82011-11-30 16:26:21 +01001585}
1586
Avi Kivitya8170e52012-10-23 12:30:10 +02001587static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001588 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001589{
1590 ram_addr_t raddr = addr;
1591 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001592 switch (size) {
1593 case 1: return stb_p(ptr, value);
1594 case 2: return stw_p(ptr, value);
1595 case 4: return stl_p(ptr, value);
1596 default: abort();
1597 }
Andreas Färber56384e82011-11-30 16:26:21 +01001598}
1599
Avi Kivityde712f92012-01-02 12:41:07 +02001600static const MemoryRegionOps subpage_ram_ops = {
1601 .read = subpage_ram_read,
1602 .write = subpage_ram_write,
1603 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001604};
1605
Anthony Liguoric227f092009-10-01 16:12:16 -05001606static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001607 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001608{
1609 int idx, eidx;
1610
1611 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1612 return -1;
1613 idx = SUBPAGE_IDX(start);
1614 eidx = SUBPAGE_IDX(end);
1615#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001616 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001617 mmio, start, end, idx, eidx, memory);
1618#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001619 if (memory_region_is_ram(phys_sections[section].mr)) {
1620 MemoryRegionSection new_section = phys_sections[section];
1621 new_section.mr = &io_mem_subpage_ram;
1622 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001623 }
blueswir1db7b5422007-05-26 17:36:03 +00001624 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001625 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001626 }
1627
1628 return 0;
1629}
1630
Avi Kivitya8170e52012-10-23 12:30:10 +02001631static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001632{
Anthony Liguoric227f092009-10-01 16:12:16 -05001633 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001634
Anthony Liguori7267c092011-08-20 22:09:37 -05001635 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001636
1637 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001638 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1639 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001640 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001641#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001642 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1643 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001644#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001645 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001646
1647 return mmio;
1648}
1649
Avi Kivity5312bd82012-02-12 18:32:55 +02001650static uint16_t dummy_section(MemoryRegion *mr)
1651{
1652 MemoryRegionSection section = {
1653 .mr = mr,
1654 .offset_within_address_space = 0,
1655 .offset_within_region = 0,
1656 .size = UINT64_MAX,
1657 };
1658
1659 return phys_section_add(&section);
1660}
1661
Avi Kivitya8170e52012-10-23 12:30:10 +02001662MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001663{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001664 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001665}
1666
Avi Kivitye9179ce2009-06-14 11:38:52 +03001667static void io_mem_init(void)
1668{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001669 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001670 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1671 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1672 "unassigned", UINT64_MAX);
1673 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1674 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001675 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1676 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001677 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1678 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001679}
1680
Avi Kivityac1970f2012-10-03 16:22:53 +02001681static void mem_begin(MemoryListener *listener)
1682{
1683 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1684
1685 destroy_all_mappings(d);
1686 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1687}
1688
Avi Kivity50c1e142012-02-08 21:36:02 +02001689static void core_begin(MemoryListener *listener)
1690{
Avi Kivity5312bd82012-02-12 18:32:55 +02001691 phys_sections_clear();
1692 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001693 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1694 phys_section_rom = dummy_section(&io_mem_rom);
1695 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001696}
1697
Avi Kivity1d711482012-10-02 18:54:45 +02001698static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001699{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001700 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001701
1702 /* since each CPU stores ram addresses in its TLB cache, we must
1703 reset the modified entries */
1704 /* XXX: slow ! */
1705 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1706 tlb_flush(env, 1);
1707 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001708}
1709
Avi Kivity93632742012-02-08 16:54:16 +02001710static void core_log_global_start(MemoryListener *listener)
1711{
1712 cpu_physical_memory_set_dirty_tracking(1);
1713}
1714
1715static void core_log_global_stop(MemoryListener *listener)
1716{
1717 cpu_physical_memory_set_dirty_tracking(0);
1718}
1719
Avi Kivity4855d412012-02-08 21:16:05 +02001720static void io_region_add(MemoryListener *listener,
1721 MemoryRegionSection *section)
1722{
Avi Kivitya2d33522012-03-05 17:40:12 +02001723 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1724
1725 mrio->mr = section->mr;
1726 mrio->offset = section->offset_within_region;
1727 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001728 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001729 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001730}
1731
1732static void io_region_del(MemoryListener *listener,
1733 MemoryRegionSection *section)
1734{
1735 isa_unassign_ioport(section->offset_within_address_space, section->size);
1736}
1737
Avi Kivity93632742012-02-08 16:54:16 +02001738static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001739 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001740 .log_global_start = core_log_global_start,
1741 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001742 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001743};
1744
Avi Kivity4855d412012-02-08 21:16:05 +02001745static MemoryListener io_memory_listener = {
1746 .region_add = io_region_add,
1747 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001748 .priority = 0,
1749};
1750
Avi Kivity1d711482012-10-02 18:54:45 +02001751static MemoryListener tcg_memory_listener = {
1752 .commit = tcg_commit,
1753};
1754
Avi Kivityac1970f2012-10-03 16:22:53 +02001755void address_space_init_dispatch(AddressSpace *as)
1756{
1757 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1758
1759 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1760 d->listener = (MemoryListener) {
1761 .begin = mem_begin,
1762 .region_add = mem_add,
1763 .region_nop = mem_add,
1764 .priority = 0,
1765 };
1766 as->dispatch = d;
1767 memory_listener_register(&d->listener, as);
1768}
1769
Avi Kivity83f3c252012-10-07 12:59:55 +02001770void address_space_destroy_dispatch(AddressSpace *as)
1771{
1772 AddressSpaceDispatch *d = as->dispatch;
1773
1774 memory_listener_unregister(&d->listener);
1775 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1776 g_free(d);
1777 as->dispatch = NULL;
1778}
1779
Avi Kivity62152b82011-07-26 14:26:14 +03001780static void memory_map_init(void)
1781{
Anthony Liguori7267c092011-08-20 22:09:37 -05001782 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001783 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001784 address_space_init(&address_space_memory, system_memory);
1785 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001786
Anthony Liguori7267c092011-08-20 22:09:37 -05001787 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001788 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001789 address_space_init(&address_space_io, system_io);
1790 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001791
Avi Kivityf6790af2012-10-02 20:13:51 +02001792 memory_listener_register(&core_memory_listener, &address_space_memory);
1793 memory_listener_register(&io_memory_listener, &address_space_io);
1794 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001795
1796 dma_context_init(&dma_context_memory, &address_space_memory,
1797 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001798}
1799
1800MemoryRegion *get_system_memory(void)
1801{
1802 return system_memory;
1803}
1804
Avi Kivity309cb472011-08-08 16:09:03 +03001805MemoryRegion *get_system_io(void)
1806{
1807 return system_io;
1808}
1809
pbrooke2eef172008-06-08 01:09:01 +00001810#endif /* !defined(CONFIG_USER_ONLY) */
1811
bellard13eb76e2004-01-24 15:23:36 +00001812/* physical memory access (slow version, mainly for debug) */
1813#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001814int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001815 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001816{
1817 int l, flags;
1818 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001819 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001820
1821 while (len > 0) {
1822 page = addr & TARGET_PAGE_MASK;
1823 l = (page + TARGET_PAGE_SIZE) - addr;
1824 if (l > len)
1825 l = len;
1826 flags = page_get_flags(page);
1827 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001828 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001829 if (is_write) {
1830 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001831 return -1;
bellard579a97f2007-11-11 14:26:47 +00001832 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001833 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001834 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001835 memcpy(p, buf, l);
1836 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001837 } else {
1838 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001839 return -1;
bellard579a97f2007-11-11 14:26:47 +00001840 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001841 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001842 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001843 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001844 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001845 }
1846 len -= l;
1847 buf += l;
1848 addr += l;
1849 }
Paul Brooka68fe892010-03-01 00:08:59 +00001850 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001851}
bellard8df1cd02005-01-28 22:37:22 +00001852
bellard13eb76e2004-01-24 15:23:36 +00001853#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001854
Avi Kivitya8170e52012-10-23 12:30:10 +02001855static void invalidate_and_set_dirty(hwaddr addr,
1856 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001857{
1858 if (!cpu_physical_memory_is_dirty(addr)) {
1859 /* invalidate code */
1860 tb_invalidate_phys_page_range(addr, addr + length, 0);
1861 /* set dirty bit */
1862 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1863 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001864 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001865}
1866
Avi Kivitya8170e52012-10-23 12:30:10 +02001867void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001868 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001869{
Avi Kivityac1970f2012-10-03 16:22:53 +02001870 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001871 int l;
bellard13eb76e2004-01-24 15:23:36 +00001872 uint8_t *ptr;
1873 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001874 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001875 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001876
bellard13eb76e2004-01-24 15:23:36 +00001877 while (len > 0) {
1878 page = addr & TARGET_PAGE_MASK;
1879 l = (page + TARGET_PAGE_SIZE) - addr;
1880 if (l > len)
1881 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001882 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001883
bellard13eb76e2004-01-24 15:23:36 +00001884 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001885 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001886 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001887 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001888 /* XXX: could force cpu_single_env to NULL to avoid
1889 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001890 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001891 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001892 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001893 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001894 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001895 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001896 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001897 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001898 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001899 l = 2;
1900 } else {
bellard1c213d12005-09-03 10:49:04 +00001901 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001902 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001903 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001904 l = 1;
1905 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001906 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001907 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001908 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001909 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001910 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001911 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001912 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001913 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001914 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001915 }
1916 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001917 if (!(memory_region_is_ram(section->mr) ||
1918 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001919 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001920 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001921 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001922 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001923 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001924 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001925 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001926 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001927 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001928 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001929 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001930 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001931 l = 2;
1932 } else {
bellard1c213d12005-09-03 10:49:04 +00001933 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001934 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001935 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001936 l = 1;
1937 }
1938 } else {
1939 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001940 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001941 + memory_region_section_addr(section,
1942 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001943 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01001944 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00001945 }
1946 }
1947 len -= l;
1948 buf += l;
1949 addr += l;
1950 }
1951}
bellard8df1cd02005-01-28 22:37:22 +00001952
Avi Kivitya8170e52012-10-23 12:30:10 +02001953void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001954 const uint8_t *buf, int len)
1955{
1956 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1957}
1958
1959/**
1960 * address_space_read: read from an address space.
1961 *
1962 * @as: #AddressSpace to be accessed
1963 * @addr: address within that address space
1964 * @buf: buffer with the data transferred
1965 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001966void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001967{
1968 address_space_rw(as, addr, buf, len, false);
1969}
1970
1971
Avi Kivitya8170e52012-10-23 12:30:10 +02001972void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001973 int len, int is_write)
1974{
1975 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1976}
1977
bellardd0ecd2a2006-04-23 17:14:48 +00001978/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001979void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001980 const uint8_t *buf, int len)
1981{
Avi Kivityac1970f2012-10-03 16:22:53 +02001982 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001983 int l;
1984 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001985 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001986 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001987
bellardd0ecd2a2006-04-23 17:14:48 +00001988 while (len > 0) {
1989 page = addr & TARGET_PAGE_MASK;
1990 l = (page + TARGET_PAGE_SIZE) - addr;
1991 if (l > len)
1992 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001993 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001994
Blue Swirlcc5bea62012-04-14 14:56:48 +00001995 if (!(memory_region_is_ram(section->mr) ||
1996 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001997 /* do nothing */
1998 } else {
1999 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002000 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002001 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002002 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002003 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002004 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002005 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002006 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00002007 }
2008 len -= l;
2009 buf += l;
2010 addr += l;
2011 }
2012}
2013
aliguori6d16c2f2009-01-22 16:59:11 +00002014typedef struct {
2015 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002016 hwaddr addr;
2017 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002018} BounceBuffer;
2019
2020static BounceBuffer bounce;
2021
aliguoriba223c22009-01-22 16:59:16 +00002022typedef struct MapClient {
2023 void *opaque;
2024 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002025 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002026} MapClient;
2027
Blue Swirl72cf2d42009-09-12 07:36:22 +00002028static QLIST_HEAD(map_client_list, MapClient) map_client_list
2029 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002030
2031void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2032{
Anthony Liguori7267c092011-08-20 22:09:37 -05002033 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002034
2035 client->opaque = opaque;
2036 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002037 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002038 return client;
2039}
2040
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002041static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002042{
2043 MapClient *client = (MapClient *)_client;
2044
Blue Swirl72cf2d42009-09-12 07:36:22 +00002045 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002046 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002047}
2048
2049static void cpu_notify_map_clients(void)
2050{
2051 MapClient *client;
2052
Blue Swirl72cf2d42009-09-12 07:36:22 +00002053 while (!QLIST_EMPTY(&map_client_list)) {
2054 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002055 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002056 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002057 }
2058}
2059
aliguori6d16c2f2009-01-22 16:59:11 +00002060/* Map a physical memory region into a host virtual address.
2061 * May map a subset of the requested range, given by and returned in *plen.
2062 * May return NULL if resources needed to perform the mapping are exhausted.
2063 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002064 * Use cpu_register_map_client() to know when retrying the map operation is
2065 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002066 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002067void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002068 hwaddr addr,
2069 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002070 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002071{
Avi Kivityac1970f2012-10-03 16:22:53 +02002072 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002073 hwaddr len = *plen;
2074 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002075 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002076 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002077 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002078 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002079 ram_addr_t rlen;
2080 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002081
2082 while (len > 0) {
2083 page = addr & TARGET_PAGE_MASK;
2084 l = (page + TARGET_PAGE_SIZE) - addr;
2085 if (l > len)
2086 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002087 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002088
Avi Kivityf3705d52012-03-08 16:16:34 +02002089 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002090 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002091 break;
2092 }
2093 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2094 bounce.addr = addr;
2095 bounce.len = l;
2096 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002097 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002098 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002099
2100 *plen = l;
2101 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002102 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002103 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002104 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002105 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002106 }
aliguori6d16c2f2009-01-22 16:59:11 +00002107
2108 len -= l;
2109 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002110 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002111 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002112 rlen = todo;
2113 ret = qemu_ram_ptr_length(raddr, &rlen);
2114 *plen = rlen;
2115 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002116}
2117
Avi Kivityac1970f2012-10-03 16:22:53 +02002118/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002119 * Will also mark the memory as dirty if is_write == 1. access_len gives
2120 * the amount of memory that was actually read or written by the caller.
2121 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002122void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2123 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002124{
2125 if (buffer != bounce.buffer) {
2126 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002127 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002128 while (access_len) {
2129 unsigned l;
2130 l = TARGET_PAGE_SIZE;
2131 if (l > access_len)
2132 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002133 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002134 addr1 += l;
2135 access_len -= l;
2136 }
2137 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002138 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002139 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002140 }
aliguori6d16c2f2009-01-22 16:59:11 +00002141 return;
2142 }
2143 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002144 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002145 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002146 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002147 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002148 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002149}
bellardd0ecd2a2006-04-23 17:14:48 +00002150
Avi Kivitya8170e52012-10-23 12:30:10 +02002151void *cpu_physical_memory_map(hwaddr addr,
2152 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002153 int is_write)
2154{
2155 return address_space_map(&address_space_memory, addr, plen, is_write);
2156}
2157
Avi Kivitya8170e52012-10-23 12:30:10 +02002158void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2159 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002160{
2161 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2162}
2163
bellard8df1cd02005-01-28 22:37:22 +00002164/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002165static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002166 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002167{
bellard8df1cd02005-01-28 22:37:22 +00002168 uint8_t *ptr;
2169 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002170 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002171
Avi Kivityac1970f2012-10-03 16:22:53 +02002172 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002173
Blue Swirlcc5bea62012-04-14 14:56:48 +00002174 if (!(memory_region_is_ram(section->mr) ||
2175 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002176 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002177 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002178 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002179#if defined(TARGET_WORDS_BIGENDIAN)
2180 if (endian == DEVICE_LITTLE_ENDIAN) {
2181 val = bswap32(val);
2182 }
2183#else
2184 if (endian == DEVICE_BIG_ENDIAN) {
2185 val = bswap32(val);
2186 }
2187#endif
bellard8df1cd02005-01-28 22:37:22 +00002188 } else {
2189 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002190 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002191 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002192 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002193 switch (endian) {
2194 case DEVICE_LITTLE_ENDIAN:
2195 val = ldl_le_p(ptr);
2196 break;
2197 case DEVICE_BIG_ENDIAN:
2198 val = ldl_be_p(ptr);
2199 break;
2200 default:
2201 val = ldl_p(ptr);
2202 break;
2203 }
bellard8df1cd02005-01-28 22:37:22 +00002204 }
2205 return val;
2206}
2207
Avi Kivitya8170e52012-10-23 12:30:10 +02002208uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002209{
2210 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2211}
2212
Avi Kivitya8170e52012-10-23 12:30:10 +02002213uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002214{
2215 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2216}
2217
Avi Kivitya8170e52012-10-23 12:30:10 +02002218uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002219{
2220 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2221}
2222
bellard84b7b8e2005-11-28 21:19:04 +00002223/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002224static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002225 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002226{
bellard84b7b8e2005-11-28 21:19:04 +00002227 uint8_t *ptr;
2228 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002229 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002230
Avi Kivityac1970f2012-10-03 16:22:53 +02002231 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002232
Blue Swirlcc5bea62012-04-14 14:56:48 +00002233 if (!(memory_region_is_ram(section->mr) ||
2234 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002235 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002236 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002237
2238 /* XXX This is broken when device endian != cpu endian.
2239 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002240#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002241 val = io_mem_read(section->mr, addr, 4) << 32;
2242 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002243#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002244 val = io_mem_read(section->mr, addr, 4);
2245 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002246#endif
2247 } else {
2248 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002249 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002250 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002251 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002252 switch (endian) {
2253 case DEVICE_LITTLE_ENDIAN:
2254 val = ldq_le_p(ptr);
2255 break;
2256 case DEVICE_BIG_ENDIAN:
2257 val = ldq_be_p(ptr);
2258 break;
2259 default:
2260 val = ldq_p(ptr);
2261 break;
2262 }
bellard84b7b8e2005-11-28 21:19:04 +00002263 }
2264 return val;
2265}
2266
Avi Kivitya8170e52012-10-23 12:30:10 +02002267uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002268{
2269 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2270}
2271
Avi Kivitya8170e52012-10-23 12:30:10 +02002272uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002273{
2274 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2275}
2276
Avi Kivitya8170e52012-10-23 12:30:10 +02002277uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002278{
2279 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2280}
2281
bellardaab33092005-10-30 20:48:42 +00002282/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002283uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002284{
2285 uint8_t val;
2286 cpu_physical_memory_read(addr, &val, 1);
2287 return val;
2288}
2289
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002290/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002291static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002292 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002293{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002294 uint8_t *ptr;
2295 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002296 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002297
Avi Kivityac1970f2012-10-03 16:22:53 +02002298 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002299
Blue Swirlcc5bea62012-04-14 14:56:48 +00002300 if (!(memory_region_is_ram(section->mr) ||
2301 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002302 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002303 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002304 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002305#if defined(TARGET_WORDS_BIGENDIAN)
2306 if (endian == DEVICE_LITTLE_ENDIAN) {
2307 val = bswap16(val);
2308 }
2309#else
2310 if (endian == DEVICE_BIG_ENDIAN) {
2311 val = bswap16(val);
2312 }
2313#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002314 } else {
2315 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002316 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002317 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002318 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002319 switch (endian) {
2320 case DEVICE_LITTLE_ENDIAN:
2321 val = lduw_le_p(ptr);
2322 break;
2323 case DEVICE_BIG_ENDIAN:
2324 val = lduw_be_p(ptr);
2325 break;
2326 default:
2327 val = lduw_p(ptr);
2328 break;
2329 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002330 }
2331 return val;
bellardaab33092005-10-30 20:48:42 +00002332}
2333
Avi Kivitya8170e52012-10-23 12:30:10 +02002334uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002335{
2336 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2337}
2338
Avi Kivitya8170e52012-10-23 12:30:10 +02002339uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002340{
2341 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2342}
2343
Avi Kivitya8170e52012-10-23 12:30:10 +02002344uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002345{
2346 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2347}
2348
bellard8df1cd02005-01-28 22:37:22 +00002349/* warning: addr must be aligned. The ram page is not masked as dirty
2350 and the code inside is not invalidated. It is useful if the dirty
2351 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002352void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002353{
bellard8df1cd02005-01-28 22:37:22 +00002354 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002355 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002356
Avi Kivityac1970f2012-10-03 16:22:53 +02002357 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002358
Avi Kivityf3705d52012-03-08 16:16:34 +02002359 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002360 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002361 if (memory_region_is_ram(section->mr)) {
2362 section = &phys_sections[phys_section_rom];
2363 }
2364 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002365 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002366 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002367 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002368 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002369 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002370 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002371
2372 if (unlikely(in_migration)) {
2373 if (!cpu_physical_memory_is_dirty(addr1)) {
2374 /* invalidate code */
2375 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2376 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002377 cpu_physical_memory_set_dirty_flags(
2378 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002379 }
2380 }
bellard8df1cd02005-01-28 22:37:22 +00002381 }
2382}
2383
Avi Kivitya8170e52012-10-23 12:30:10 +02002384void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00002385{
j_mayerbc98a7e2007-04-04 07:55:12 +00002386 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002387 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00002388
Avi Kivityac1970f2012-10-03 16:22:53 +02002389 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002390
Avi Kivityf3705d52012-03-08 16:16:34 +02002391 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002392 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002393 if (memory_region_is_ram(section->mr)) {
2394 section = &phys_sections[phys_section_rom];
2395 }
j_mayerbc98a7e2007-04-04 07:55:12 +00002396#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002397 io_mem_write(section->mr, addr, val >> 32, 4);
2398 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002399#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002400 io_mem_write(section->mr, addr, (uint32_t)val, 4);
2401 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00002402#endif
2403 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002404 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002405 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002406 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00002407 stq_p(ptr, val);
2408 }
2409}
2410
bellard8df1cd02005-01-28 22:37:22 +00002411/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002412static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002413 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002414{
bellard8df1cd02005-01-28 22:37:22 +00002415 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002416 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002417
Avi Kivityac1970f2012-10-03 16:22:53 +02002418 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002419
Avi Kivityf3705d52012-03-08 16:16:34 +02002420 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002421 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002422 if (memory_region_is_ram(section->mr)) {
2423 section = &phys_sections[phys_section_rom];
2424 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002425#if defined(TARGET_WORDS_BIGENDIAN)
2426 if (endian == DEVICE_LITTLE_ENDIAN) {
2427 val = bswap32(val);
2428 }
2429#else
2430 if (endian == DEVICE_BIG_ENDIAN) {
2431 val = bswap32(val);
2432 }
2433#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002434 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002435 } else {
2436 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002437 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002438 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002439 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002440 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002441 switch (endian) {
2442 case DEVICE_LITTLE_ENDIAN:
2443 stl_le_p(ptr, val);
2444 break;
2445 case DEVICE_BIG_ENDIAN:
2446 stl_be_p(ptr, val);
2447 break;
2448 default:
2449 stl_p(ptr, val);
2450 break;
2451 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002452 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002453 }
2454}
2455
Avi Kivitya8170e52012-10-23 12:30:10 +02002456void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002457{
2458 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2459}
2460
Avi Kivitya8170e52012-10-23 12:30:10 +02002461void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002462{
2463 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2464}
2465
Avi Kivitya8170e52012-10-23 12:30:10 +02002466void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002467{
2468 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2469}
2470
bellardaab33092005-10-30 20:48:42 +00002471/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002472void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002473{
2474 uint8_t v = val;
2475 cpu_physical_memory_write(addr, &v, 1);
2476}
2477
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002478/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002479static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002480 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002481{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002482 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002483 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002484
Avi Kivityac1970f2012-10-03 16:22:53 +02002485 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002486
Avi Kivityf3705d52012-03-08 16:16:34 +02002487 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002488 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002489 if (memory_region_is_ram(section->mr)) {
2490 section = &phys_sections[phys_section_rom];
2491 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002492#if defined(TARGET_WORDS_BIGENDIAN)
2493 if (endian == DEVICE_LITTLE_ENDIAN) {
2494 val = bswap16(val);
2495 }
2496#else
2497 if (endian == DEVICE_BIG_ENDIAN) {
2498 val = bswap16(val);
2499 }
2500#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002501 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002502 } else {
2503 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002504 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002505 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002506 /* RAM case */
2507 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002508 switch (endian) {
2509 case DEVICE_LITTLE_ENDIAN:
2510 stw_le_p(ptr, val);
2511 break;
2512 case DEVICE_BIG_ENDIAN:
2513 stw_be_p(ptr, val);
2514 break;
2515 default:
2516 stw_p(ptr, val);
2517 break;
2518 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002519 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002520 }
bellardaab33092005-10-30 20:48:42 +00002521}
2522
Avi Kivitya8170e52012-10-23 12:30:10 +02002523void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002524{
2525 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2526}
2527
Avi Kivitya8170e52012-10-23 12:30:10 +02002528void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002529{
2530 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2531}
2532
Avi Kivitya8170e52012-10-23 12:30:10 +02002533void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002534{
2535 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2536}
2537
bellardaab33092005-10-30 20:48:42 +00002538/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002539void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002540{
2541 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002542 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002543}
2544
Avi Kivitya8170e52012-10-23 12:30:10 +02002545void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002546{
2547 val = cpu_to_le64(val);
2548 cpu_physical_memory_write(addr, &val, 8);
2549}
2550
Avi Kivitya8170e52012-10-23 12:30:10 +02002551void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002552{
2553 val = cpu_to_be64(val);
2554 cpu_physical_memory_write(addr, &val, 8);
2555}
2556
aliguori5e2972f2009-03-28 17:51:36 +00002557/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002558int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002559 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002560{
2561 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002562 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002563 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002564
2565 while (len > 0) {
2566 page = addr & TARGET_PAGE_MASK;
2567 phys_addr = cpu_get_phys_page_debug(env, page);
2568 /* if no physical page mapped, return an error */
2569 if (phys_addr == -1)
2570 return -1;
2571 l = (page + TARGET_PAGE_SIZE) - addr;
2572 if (l > len)
2573 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002574 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002575 if (is_write)
2576 cpu_physical_memory_write_rom(phys_addr, buf, l);
2577 else
aliguori5e2972f2009-03-28 17:51:36 +00002578 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002579 len -= l;
2580 buf += l;
2581 addr += l;
2582 }
2583 return 0;
2584}
Paul Brooka68fe892010-03-01 00:08:59 +00002585#endif
bellard13eb76e2004-01-24 15:23:36 +00002586
Blue Swirl8e4a4242013-01-06 18:30:17 +00002587#if !defined(CONFIG_USER_ONLY)
2588
2589/*
2590 * A helper function for the _utterly broken_ virtio device model to find out if
2591 * it's running on a big endian machine. Don't do this at home kids!
2592 */
2593bool virtio_is_big_endian(void);
2594bool virtio_is_big_endian(void)
2595{
2596#if defined(TARGET_WORDS_BIGENDIAN)
2597 return true;
2598#else
2599 return false;
2600#endif
2601}
2602
2603#endif
2604
Wen Congyang76f35532012-05-07 12:04:18 +08002605#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002606bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002607{
2608 MemoryRegionSection *section;
2609
Avi Kivityac1970f2012-10-03 16:22:53 +02002610 section = phys_page_find(address_space_memory.dispatch,
2611 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002612
2613 return !(memory_region_is_ram(section->mr) ||
2614 memory_region_is_romd(section->mr));
2615}
2616#endif