blob: 7e22980e778225c5adce91309c1e9e70a8ad7563 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Paolo Bonzini0844e002013-05-24 14:37:28 +020069MemoryRegion io_mem_rom, io_mem_notdirty;
70static MemoryRegion io_mem_unassigned, io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200190
Avi Kivity07f07b32012-02-13 20:45:32 +0200191 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200192 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinifd298932013-05-20 12:21:07 +0200193 return &phys_sections[phys_section_unassigned];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200195 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200196 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200197 }
Paolo Bonzinifd298932013-05-20 12:21:07 +0200198 return &phys_sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200199}
200
Blue Swirle5548612012-04-21 13:08:33 +0000201bool memory_region_is_unassigned(MemoryRegion *mr)
202{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200203 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000204 && mr != &io_mem_watch;
205}
bellard9fa3e852004-01-04 18:06:42 +0000206#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000207
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200208void cpu_exec_init_all(void)
209{
210#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700211 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200212 memory_map_init();
213 io_mem_init();
214#endif
215}
216
Andreas Färberb170fce2013-01-20 20:23:22 +0100217#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000218
Juan Quintelae59fb372009-09-29 22:48:21 +0200219static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200220{
Andreas Färber259186a2013-01-17 18:51:17 +0100221 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200222
aurel323098dba2009-03-07 21:28:24 +0000223 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
224 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100225 cpu->interrupt_request &= ~0x01;
226 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000227
228 return 0;
229}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200230
231static const VMStateDescription vmstate_cpu_common = {
232 .name = "cpu_common",
233 .version_id = 1,
234 .minimum_version_id = 1,
235 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200236 .post_load = cpu_common_post_load,
237 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100238 VMSTATE_UINT32(halted, CPUState),
239 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200240 VMSTATE_END_OF_LIST()
241 }
242};
Andreas Färberb170fce2013-01-20 20:23:22 +0100243#else
244#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000245#endif
246
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100247CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400248{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100249 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100250 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400251
252 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100253 cpu = ENV_GET_CPU(env);
254 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400255 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100256 }
Glauber Costa950f1472009-06-09 12:15:18 -0400257 env = env->next_cpu;
258 }
259
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100260 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400261}
262
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200263void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
264{
265 CPUArchState *env = first_cpu;
266
267 while (env) {
268 func(ENV_GET_CPU(env), data);
269 env = env->next_cpu;
270 }
271}
272
Andreas Färber9349b4f2012-03-14 01:38:32 +0100273void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000274{
Andreas Färber9f09e182012-05-03 06:59:07 +0200275 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100276 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100277 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000278 int cpu_index;
279
pbrookc2764712009-03-07 15:24:59 +0000280#if defined(CONFIG_USER_ONLY)
281 cpu_list_lock();
282#endif
bellard6a00d602005-11-21 23:25:50 +0000283 env->next_cpu = NULL;
284 penv = &first_cpu;
285 cpu_index = 0;
286 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700287 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000288 cpu_index++;
289 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100290 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100291 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000292 QTAILQ_INIT(&env->breakpoints);
293 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100294#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200295 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100296#endif
bellard6a00d602005-11-21 23:25:50 +0000297 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000298#if defined(CONFIG_USER_ONLY)
299 cpu_list_unlock();
300#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100301 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000302#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600303 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000304 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100305 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000306#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100307 if (cc->vmsd != NULL) {
308 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
309 }
bellardfd6ce8f2003-05-14 19:00:11 +0000310}
311
bellard1fddef42005-04-17 19:16:13 +0000312#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000313#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100314static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000315{
316 tb_invalidate_phys_page_range(pc, pc + 1, 0);
317}
318#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400319static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
320{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400321 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
322 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400323}
bellardc27004e2005-01-03 23:35:10 +0000324#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000325#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000326
Paul Brookc527ee82010-03-01 03:31:14 +0000327#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100328void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000329
330{
331}
332
Andreas Färber9349b4f2012-03-14 01:38:32 +0100333int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000334 int flags, CPUWatchpoint **watchpoint)
335{
336 return -ENOSYS;
337}
338#else
pbrook6658ffb2007-03-16 23:58:11 +0000339/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100340int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000341 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000342{
aliguorib4051332008-11-18 20:14:20 +0000343 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000344 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000345
aliguorib4051332008-11-18 20:14:20 +0000346 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400347 if ((len & (len - 1)) || (addr & ~len_mask) ||
348 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000349 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
350 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
351 return -EINVAL;
352 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500353 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000354
aliguoria1d1bb32008-11-18 20:07:32 +0000355 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000356 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000357 wp->flags = flags;
358
aliguori2dc9f412008-11-18 20:56:59 +0000359 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000360 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000361 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000362 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000363 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000364
pbrook6658ffb2007-03-16 23:58:11 +0000365 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000366
367 if (watchpoint)
368 *watchpoint = wp;
369 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000370}
371
aliguoria1d1bb32008-11-18 20:07:32 +0000372/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100373int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000374 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000375{
aliguorib4051332008-11-18 20:14:20 +0000376 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000377 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000378
Blue Swirl72cf2d42009-09-12 07:36:22 +0000379 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000380 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000381 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000382 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000383 return 0;
384 }
385 }
aliguoria1d1bb32008-11-18 20:07:32 +0000386 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000387}
388
aliguoria1d1bb32008-11-18 20:07:32 +0000389/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100390void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000391{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000392 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000393
aliguoria1d1bb32008-11-18 20:07:32 +0000394 tlb_flush_page(env, watchpoint->vaddr);
395
Anthony Liguori7267c092011-08-20 22:09:37 -0500396 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000397}
398
aliguoria1d1bb32008-11-18 20:07:32 +0000399/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100400void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000401{
aliguoric0ce9982008-11-25 22:13:57 +0000402 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000403
Blue Swirl72cf2d42009-09-12 07:36:22 +0000404 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000405 if (wp->flags & mask)
406 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000407 }
aliguoria1d1bb32008-11-18 20:07:32 +0000408}
Paul Brookc527ee82010-03-01 03:31:14 +0000409#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000410
411/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100412int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000413 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000414{
bellard1fddef42005-04-17 19:16:13 +0000415#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000416 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000417
Anthony Liguori7267c092011-08-20 22:09:37 -0500418 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000419
420 bp->pc = pc;
421 bp->flags = flags;
422
aliguori2dc9f412008-11-18 20:56:59 +0000423 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000424 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000425 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000426 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000427 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000428
429 breakpoint_invalidate(env, pc);
430
431 if (breakpoint)
432 *breakpoint = bp;
433 return 0;
434#else
435 return -ENOSYS;
436#endif
437}
438
439/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100440int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000441{
442#if defined(TARGET_HAS_ICE)
443 CPUBreakpoint *bp;
444
Blue Swirl72cf2d42009-09-12 07:36:22 +0000445 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000446 if (bp->pc == pc && bp->flags == flags) {
447 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000448 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000449 }
bellard4c3a88a2003-07-26 12:06:08 +0000450 }
aliguoria1d1bb32008-11-18 20:07:32 +0000451 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000452#else
aliguoria1d1bb32008-11-18 20:07:32 +0000453 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000454#endif
455}
456
aliguoria1d1bb32008-11-18 20:07:32 +0000457/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100458void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000459{
bellard1fddef42005-04-17 19:16:13 +0000460#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000461 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000462
aliguoria1d1bb32008-11-18 20:07:32 +0000463 breakpoint_invalidate(env, breakpoint->pc);
464
Anthony Liguori7267c092011-08-20 22:09:37 -0500465 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000466#endif
467}
468
469/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100470void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000471{
472#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000473 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000474
Blue Swirl72cf2d42009-09-12 07:36:22 +0000475 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000476 if (bp->flags & mask)
477 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000478 }
bellard4c3a88a2003-07-26 12:06:08 +0000479#endif
480}
481
bellardc33a3462003-07-29 20:50:33 +0000482/* enable or disable single step mode. EXCP_DEBUG is returned by the
483 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100484void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000485{
bellard1fddef42005-04-17 19:16:13 +0000486#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000487 if (env->singlestep_enabled != enabled) {
488 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000489 if (kvm_enabled())
490 kvm_update_guest_debug(env, 0);
491 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100492 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000493 /* XXX: only flush what is necessary */
494 tb_flush(env);
495 }
bellardc33a3462003-07-29 20:50:33 +0000496 }
497#endif
498}
499
Andreas Färber9349b4f2012-03-14 01:38:32 +0100500void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000501{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100502 CPUState *cpu = ENV_GET_CPU(env);
503
504 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000505 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000506}
507
Andreas Färber9349b4f2012-03-14 01:38:32 +0100508void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000509{
510 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000511 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000512
513 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000514 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000515 fprintf(stderr, "qemu: fatal: ");
516 vfprintf(stderr, fmt, ap);
517 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100518 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000519 if (qemu_log_enabled()) {
520 qemu_log("qemu: fatal: ");
521 qemu_log_vprintf(fmt, ap2);
522 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100523 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000524 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000525 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000526 }
pbrook493ae1f2007-11-23 16:53:59 +0000527 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000528 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200529#if defined(CONFIG_USER_ONLY)
530 {
531 struct sigaction act;
532 sigfillset(&act.sa_mask);
533 act.sa_handler = SIG_DFL;
534 sigaction(SIGABRT, &act, NULL);
535 }
536#endif
bellard75012672003-06-21 13:11:07 +0000537 abort();
538}
539
Andreas Färber9349b4f2012-03-14 01:38:32 +0100540CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000541{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100542 CPUArchState *new_env = cpu_init(env->cpu_model_str);
543 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000544#if defined(TARGET_HAS_ICE)
545 CPUBreakpoint *bp;
546 CPUWatchpoint *wp;
547#endif
548
Andreas Färber9349b4f2012-03-14 01:38:32 +0100549 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000550
Andreas Färber55e5c282012-12-17 06:18:02 +0100551 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000552 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000553
554 /* Clone all break/watchpoints.
555 Note: Once we support ptrace with hw-debug register access, make sure
556 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000557 QTAILQ_INIT(&env->breakpoints);
558 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000559#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000560 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000561 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
562 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000563 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000564 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
565 wp->flags, NULL);
566 }
567#endif
568
thsc5be9f02007-02-28 20:20:53 +0000569 return new_env;
570}
571
bellard01243112004-01-04 15:48:17 +0000572#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200573static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
574 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000575{
Juan Quintelad24981d2012-05-22 00:42:40 +0200576 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000577
bellard1ccde1c2004-02-06 19:46:14 +0000578 /* we modify the TLB cache so that the dirty bit will be set again
579 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200580 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200581 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000582 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200583 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000584 != (end - 1) - start) {
585 abort();
586 }
Blue Swirle5548612012-04-21 13:08:33 +0000587 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200588
589}
590
591/* Note: start and end must be within the same ram block. */
592void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
593 int dirty_flags)
594{
595 uintptr_t length;
596
597 start &= TARGET_PAGE_MASK;
598 end = TARGET_PAGE_ALIGN(end);
599
600 length = end - start;
601 if (length == 0)
602 return;
603 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
604
605 if (tcg_enabled()) {
606 tlb_reset_dirty_range_all(start, end, length);
607 }
bellard1ccde1c2004-02-06 19:46:14 +0000608}
609
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000610static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000611{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200612 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000613 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200614 return ret;
aliguori74576192008-10-06 14:02:03 +0000615}
616
Avi Kivitya8170e52012-10-23 12:30:10 +0200617hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000618 MemoryRegionSection *section,
619 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200620 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000621 int prot,
622 target_ulong *address)
623{
Avi Kivitya8170e52012-10-23 12:30:10 +0200624 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000625 CPUWatchpoint *wp;
626
Blue Swirlcc5bea62012-04-14 14:56:48 +0000627 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000628 /* Normal RAM. */
629 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000630 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000631 if (!section->readonly) {
632 iotlb |= phys_section_notdirty;
633 } else {
634 iotlb |= phys_section_rom;
635 }
636 } else {
Blue Swirle5548612012-04-21 13:08:33 +0000637 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000638 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000639 }
640
641 /* Make accesses to pages with watchpoints go via the
642 watchpoint trap routines. */
643 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
644 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
645 /* Avoid trapping reads of pages with a write breakpoint. */
646 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
647 iotlb = phys_section_watch + paddr;
648 *address |= TLB_MMIO;
649 break;
650 }
651 }
652 }
653
654 return iotlb;
655}
bellard9fa3e852004-01-04 18:06:42 +0000656#endif /* defined(CONFIG_USER_ONLY) */
657
pbrooke2eef172008-06-08 01:09:01 +0000658#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000659
Paul Brookc04b2b72010-03-01 03:31:14 +0000660#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
661typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200662 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200663 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200664 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000665} subpage_t;
666
Anthony Liguoric227f092009-10-01 16:12:16 -0500667static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200668 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200669static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200670static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200671{
Avi Kivity5312bd82012-02-12 18:32:55 +0200672 MemoryRegionSection *section = &phys_sections[section_index];
673 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200674
675 if (mr->subpage) {
676 subpage_t *subpage = container_of(mr, subpage_t, iomem);
677 memory_region_destroy(&subpage->iomem);
678 g_free(subpage);
679 }
680}
681
Avi Kivity4346ae32012-02-10 17:00:01 +0200682static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200683{
684 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200685 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200686
Avi Kivityc19e8802012-02-13 20:25:31 +0200687 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200688 return;
689 }
690
Avi Kivityc19e8802012-02-13 20:25:31 +0200691 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200692 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200693 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200694 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200695 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200696 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200697 }
Avi Kivity54688b12012-02-09 17:34:32 +0200698 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200699 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200700 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200701}
702
Avi Kivityac1970f2012-10-03 16:22:53 +0200703static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200704{
Avi Kivityac1970f2012-10-03 16:22:53 +0200705 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200706 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200707}
708
Avi Kivity5312bd82012-02-12 18:32:55 +0200709static uint16_t phys_section_add(MemoryRegionSection *section)
710{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200711 /* The physical section number is ORed with a page-aligned
712 * pointer to produce the iotlb entries. Thus it should
713 * never overflow into the page-aligned value.
714 */
715 assert(phys_sections_nb < TARGET_PAGE_SIZE);
716
Avi Kivity5312bd82012-02-12 18:32:55 +0200717 if (phys_sections_nb == phys_sections_nb_alloc) {
718 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
719 phys_sections = g_renew(MemoryRegionSection, phys_sections,
720 phys_sections_nb_alloc);
721 }
722 phys_sections[phys_sections_nb] = *section;
723 return phys_sections_nb++;
724}
725
726static void phys_sections_clear(void)
727{
728 phys_sections_nb = 0;
729}
730
Avi Kivityac1970f2012-10-03 16:22:53 +0200731static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200732{
733 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200734 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200735 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200736 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737 MemoryRegionSection subsection = {
738 .offset_within_address_space = base,
739 .size = TARGET_PAGE_SIZE,
740 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200741 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200742
Avi Kivityf3705d52012-03-08 16:16:34 +0200743 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200744
Avi Kivityf3705d52012-03-08 16:16:34 +0200745 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200746 subpage = subpage_init(base);
747 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200748 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200749 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200750 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200751 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200752 }
753 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400754 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200755 subpage_register(subpage, start, end, phys_section_add(section));
756}
757
758
Avi Kivityac1970f2012-10-03 16:22:53 +0200759static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000760{
Avi Kivitya8170e52012-10-23 12:30:10 +0200761 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200762 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200763 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200764 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200765
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200766 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200767
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200768 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200769 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200770 section_index);
bellard33417e72003-08-10 21:47:01 +0000771}
772
Avi Kivity86a86232012-10-30 13:47:45 +0200773QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
774
775static MemoryRegionSection limit(MemoryRegionSection section)
776{
777 section.size = MIN(section.offset_within_address_space + section.size,
778 MAX_PHYS_ADDR + 1)
779 - section.offset_within_address_space;
780
781 return section;
782}
783
Avi Kivityac1970f2012-10-03 16:22:53 +0200784static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200785{
Avi Kivityac1970f2012-10-03 16:22:53 +0200786 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity86a86232012-10-30 13:47:45 +0200787 MemoryRegionSection now = limit(*section), remain = limit(*section);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200788
789 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
790 || (now.size < TARGET_PAGE_SIZE)) {
791 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
792 - now.offset_within_address_space,
793 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200794 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200795 remain.size -= now.size;
796 remain.offset_within_address_space += now.size;
797 remain.offset_within_region += now.size;
798 }
Tyler Hall69b67642012-07-25 18:45:04 -0400799 while (remain.size >= TARGET_PAGE_SIZE) {
800 now = remain;
801 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
802 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200803 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400804 } else {
805 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200806 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400807 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200808 remain.size -= now.size;
809 remain.offset_within_address_space += now.size;
810 remain.offset_within_region += now.size;
811 }
812 now = remain;
813 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200814 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200815 }
816}
817
Sheng Yang62a27442010-01-26 19:21:16 +0800818void qemu_flush_coalesced_mmio_buffer(void)
819{
820 if (kvm_enabled())
821 kvm_flush_coalesced_mmio_buffer();
822}
823
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700824void qemu_mutex_lock_ramlist(void)
825{
826 qemu_mutex_lock(&ram_list.mutex);
827}
828
829void qemu_mutex_unlock_ramlist(void)
830{
831 qemu_mutex_unlock(&ram_list.mutex);
832}
833
Marcelo Tosattic9027602010-03-01 20:25:08 -0300834#if defined(__linux__) && !defined(TARGET_S390X)
835
836#include <sys/vfs.h>
837
838#define HUGETLBFS_MAGIC 0x958458f6
839
840static long gethugepagesize(const char *path)
841{
842 struct statfs fs;
843 int ret;
844
845 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900846 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300847 } while (ret != 0 && errno == EINTR);
848
849 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900850 perror(path);
851 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300852 }
853
854 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900855 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300856
857 return fs.f_bsize;
858}
859
Alex Williamson04b16652010-07-02 11:13:17 -0600860static void *file_ram_alloc(RAMBlock *block,
861 ram_addr_t memory,
862 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300863{
864 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500865 char *sanitized_name;
866 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300867 void *area;
868 int fd;
869#ifdef MAP_POPULATE
870 int flags;
871#endif
872 unsigned long hpagesize;
873
874 hpagesize = gethugepagesize(path);
875 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900876 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300877 }
878
879 if (memory < hpagesize) {
880 return NULL;
881 }
882
883 if (kvm_enabled() && !kvm_has_sync_mmu()) {
884 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
885 return NULL;
886 }
887
Peter Feiner8ca761f2013-03-04 13:54:25 -0500888 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
889 sanitized_name = g_strdup(block->mr->name);
890 for (c = sanitized_name; *c != '\0'; c++) {
891 if (*c == '/')
892 *c = '_';
893 }
894
895 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
896 sanitized_name);
897 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300898
899 fd = mkstemp(filename);
900 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900901 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100902 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900903 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300904 }
905 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100906 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300907
908 memory = (memory+hpagesize-1) & ~(hpagesize-1);
909
910 /*
911 * ftruncate is not supported by hugetlbfs in older
912 * hosts, so don't bother bailing out on errors.
913 * If anything goes wrong with it under other filesystems,
914 * mmap will fail.
915 */
916 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900917 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300918
919#ifdef MAP_POPULATE
920 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
921 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
922 * to sidestep this quirk.
923 */
924 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
925 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
926#else
927 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
928#endif
929 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900930 perror("file_ram_alloc: can't mmap RAM pages");
931 close(fd);
932 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300933 }
Alex Williamson04b16652010-07-02 11:13:17 -0600934 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300935 return area;
936}
937#endif
938
Alex Williamsond17b5282010-06-25 11:08:38 -0600939static ram_addr_t find_ram_offset(ram_addr_t size)
940{
Alex Williamson04b16652010-07-02 11:13:17 -0600941 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600942 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600943
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100944 assert(size != 0); /* it would hand out same offset multiple times */
945
Paolo Bonzinia3161032012-11-14 15:54:48 +0100946 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600947 return 0;
948
Paolo Bonzinia3161032012-11-14 15:54:48 +0100949 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000950 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600951
952 end = block->offset + block->length;
953
Paolo Bonzinia3161032012-11-14 15:54:48 +0100954 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600955 if (next_block->offset >= end) {
956 next = MIN(next, next_block->offset);
957 }
958 }
959 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600960 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600961 mingap = next - end;
962 }
963 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600964
965 if (offset == RAM_ADDR_MAX) {
966 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
967 (uint64_t)size);
968 abort();
969 }
970
Alex Williamson04b16652010-07-02 11:13:17 -0600971 return offset;
972}
973
Juan Quintela652d7ec2012-07-20 10:37:54 +0200974ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600975{
Alex Williamsond17b5282010-06-25 11:08:38 -0600976 RAMBlock *block;
977 ram_addr_t last = 0;
978
Paolo Bonzinia3161032012-11-14 15:54:48 +0100979 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600980 last = MAX(last, block->offset + block->length);
981
982 return last;
983}
984
Jason Baronddb97f12012-08-02 15:44:16 -0400985static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
986{
987 int ret;
988 QemuOpts *machine_opts;
989
990 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
991 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
992 if (machine_opts &&
993 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
994 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
995 if (ret) {
996 perror("qemu_madvise");
997 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
998 "but dump_guest_core=off specified\n");
999 }
1000 }
1001}
1002
Avi Kivityc5705a72011-12-20 15:59:12 +02001003void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001004{
1005 RAMBlock *new_block, *block;
1006
Avi Kivityc5705a72011-12-20 15:59:12 +02001007 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001008 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001009 if (block->offset == addr) {
1010 new_block = block;
1011 break;
1012 }
1013 }
1014 assert(new_block);
1015 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001016
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001017 if (dev) {
1018 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001019 if (id) {
1020 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001021 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001022 }
1023 }
1024 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1025
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001026 /* This assumes the iothread lock is taken here too. */
1027 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001028 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001029 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001030 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1031 new_block->idstr);
1032 abort();
1033 }
1034 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001035 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001036}
1037
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001038static int memory_try_enable_merging(void *addr, size_t len)
1039{
1040 QemuOpts *opts;
1041
1042 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1043 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1044 /* disabled by the user */
1045 return 0;
1046 }
1047
1048 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1049}
1050
Avi Kivityc5705a72011-12-20 15:59:12 +02001051ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1052 MemoryRegion *mr)
1053{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001054 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001055
1056 size = TARGET_PAGE_ALIGN(size);
1057 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001058
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001059 /* This assumes the iothread lock is taken here too. */
1060 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001061 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001062 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001063 if (host) {
1064 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001065 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001066 } else {
1067 if (mem_path) {
1068#if defined (__linux__) && !defined(TARGET_S390X)
1069 new_block->host = file_ram_alloc(new_block, size, mem_path);
1070 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001071 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001072 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001073 }
1074#else
1075 fprintf(stderr, "-mem-path option unsupported\n");
1076 exit(1);
1077#endif
1078 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001079 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001080 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001081 } else if (kvm_enabled()) {
1082 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001083 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001084 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001085 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001086 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001087 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001088 }
1089 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001090 new_block->length = size;
1091
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001092 /* Keep the list sorted from biggest to smallest block. */
1093 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1094 if (block->length < new_block->length) {
1095 break;
1096 }
1097 }
1098 if (block) {
1099 QTAILQ_INSERT_BEFORE(block, new_block, next);
1100 } else {
1101 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1102 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001103 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001104
Umesh Deshpandef798b072011-08-18 11:41:17 -07001105 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001106 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001107
Anthony Liguori7267c092011-08-20 22:09:37 -05001108 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001109 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001110 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1111 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001112 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001113
Jason Baronddb97f12012-08-02 15:44:16 -04001114 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001115 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001116
Cam Macdonell84b89d72010-07-26 18:10:57 -06001117 if (kvm_enabled())
1118 kvm_setup_guest_memory(new_block->host, size);
1119
1120 return new_block->offset;
1121}
1122
Avi Kivityc5705a72011-12-20 15:59:12 +02001123ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001124{
Avi Kivityc5705a72011-12-20 15:59:12 +02001125 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001126}
bellarde9a1ab12007-02-08 23:08:38 +00001127
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001128void qemu_ram_free_from_ptr(ram_addr_t addr)
1129{
1130 RAMBlock *block;
1131
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001132 /* This assumes the iothread lock is taken here too. */
1133 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001134 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001135 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001136 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001137 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001138 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001139 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001140 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001141 }
1142 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001143 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001144}
1145
Anthony Liguoric227f092009-10-01 16:12:16 -05001146void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001147{
Alex Williamson04b16652010-07-02 11:13:17 -06001148 RAMBlock *block;
1149
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001150 /* This assumes the iothread lock is taken here too. */
1151 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001152 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001153 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001154 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001155 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001156 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001157 if (block->flags & RAM_PREALLOC_MASK) {
1158 ;
1159 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001160#if defined (__linux__) && !defined(TARGET_S390X)
1161 if (block->fd) {
1162 munmap(block->host, block->length);
1163 close(block->fd);
1164 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001165 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001166 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001167#else
1168 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001169#endif
1170 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001171 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001172 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001173 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001174 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001175 }
Alex Williamson04b16652010-07-02 11:13:17 -06001176 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001177 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001178 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001179 }
1180 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001181 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001182
bellarde9a1ab12007-02-08 23:08:38 +00001183}
1184
Huang Yingcd19cfa2011-03-02 08:56:19 +01001185#ifndef _WIN32
1186void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1187{
1188 RAMBlock *block;
1189 ram_addr_t offset;
1190 int flags;
1191 void *area, *vaddr;
1192
Paolo Bonzinia3161032012-11-14 15:54:48 +01001193 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001194 offset = addr - block->offset;
1195 if (offset < block->length) {
1196 vaddr = block->host + offset;
1197 if (block->flags & RAM_PREALLOC_MASK) {
1198 ;
1199 } else {
1200 flags = MAP_FIXED;
1201 munmap(vaddr, length);
1202 if (mem_path) {
1203#if defined(__linux__) && !defined(TARGET_S390X)
1204 if (block->fd) {
1205#ifdef MAP_POPULATE
1206 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1207 MAP_PRIVATE;
1208#else
1209 flags |= MAP_PRIVATE;
1210#endif
1211 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1212 flags, block->fd, offset);
1213 } else {
1214 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1215 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1216 flags, -1, 0);
1217 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001218#else
1219 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001220#endif
1221 } else {
1222#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1223 flags |= MAP_SHARED | MAP_ANONYMOUS;
1224 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1225 flags, -1, 0);
1226#else
1227 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1228 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1229 flags, -1, 0);
1230#endif
1231 }
1232 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001233 fprintf(stderr, "Could not remap addr: "
1234 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001235 length, addr);
1236 exit(1);
1237 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001238 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001239 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001240 }
1241 return;
1242 }
1243 }
1244}
1245#endif /* !_WIN32 */
1246
pbrookdc828ca2009-04-09 22:21:07 +00001247/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001248 With the exception of the softmmu code in this file, this should
1249 only be used for local memory (e.g. video ram) that the device owns,
1250 and knows it isn't going to access beyond the end of the block.
1251
1252 It should not be used for general purpose DMA.
1253 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1254 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001255void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001256{
pbrook94a6b542009-04-11 17:15:54 +00001257 RAMBlock *block;
1258
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001259 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001260 block = ram_list.mru_block;
1261 if (block && addr - block->offset < block->length) {
1262 goto found;
1263 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001264 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001265 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001266 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001267 }
pbrook94a6b542009-04-11 17:15:54 +00001268 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001269
1270 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1271 abort();
1272
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001273found:
1274 ram_list.mru_block = block;
1275 if (xen_enabled()) {
1276 /* We need to check if the requested address is in the RAM
1277 * because we don't want to map the entire memory in QEMU.
1278 * In that case just map until the end of the page.
1279 */
1280 if (block->offset == 0) {
1281 return xen_map_cache(addr, 0, 0);
1282 } else if (block->host == NULL) {
1283 block->host =
1284 xen_map_cache(block->offset, block->length, 1);
1285 }
1286 }
1287 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001288}
1289
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001290/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1291 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1292 *
1293 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001294 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001295static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001296{
1297 RAMBlock *block;
1298
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001299 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001300 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001301 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001302 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001303 /* We need to check if the requested address is in the RAM
1304 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001305 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001306 */
1307 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001308 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001309 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001310 block->host =
1311 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001312 }
1313 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001314 return block->host + (addr - block->offset);
1315 }
1316 }
1317
1318 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1319 abort();
1320
1321 return NULL;
1322}
1323
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001324/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1325 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001326static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001327{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001328 if (*size == 0) {
1329 return NULL;
1330 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001331 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001332 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001333 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001334 RAMBlock *block;
1335
Paolo Bonzinia3161032012-11-14 15:54:48 +01001336 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001337 if (addr - block->offset < block->length) {
1338 if (addr - block->offset + *size > block->length)
1339 *size = block->length - addr + block->offset;
1340 return block->host + (addr - block->offset);
1341 }
1342 }
1343
1344 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1345 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001346 }
1347}
1348
Marcelo Tosattie8902612010-10-11 15:31:19 -03001349int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001350{
pbrook94a6b542009-04-11 17:15:54 +00001351 RAMBlock *block;
1352 uint8_t *host = ptr;
1353
Jan Kiszka868bb332011-06-21 22:59:09 +02001354 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001355 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001356 return 0;
1357 }
1358
Paolo Bonzinia3161032012-11-14 15:54:48 +01001359 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001360 /* This case append when the block is not mapped. */
1361 if (block->host == NULL) {
1362 continue;
1363 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001364 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001365 *ram_addr = block->offset + (host - block->host);
1366 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001367 }
pbrook94a6b542009-04-11 17:15:54 +00001368 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001369
Marcelo Tosattie8902612010-10-11 15:31:19 -03001370 return -1;
1371}
Alex Williamsonf471a172010-06-11 11:11:42 -06001372
Marcelo Tosattie8902612010-10-11 15:31:19 -03001373/* Some of the softmmu routines need to translate from a host pointer
1374 (typically a TLB entry) back to a ram offset. */
1375ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1376{
1377 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001378
Marcelo Tosattie8902612010-10-11 15:31:19 -03001379 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1380 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1381 abort();
1382 }
1383 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001384}
1385
Avi Kivitya8170e52012-10-23 12:30:10 +02001386static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001387 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001388{
pbrook67d3b952006-12-18 05:03:52 +00001389#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001390 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001391#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001392#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001393 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001394#endif
1395 return 0;
1396}
1397
Avi Kivitya8170e52012-10-23 12:30:10 +02001398static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001399 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001400{
1401#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001402 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001403#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001404#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001405 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001406#endif
1407}
1408
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001409static const MemoryRegionOps unassigned_mem_ops = {
1410 .read = unassigned_mem_read,
1411 .write = unassigned_mem_write,
1412 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001413};
1414
Avi Kivitya8170e52012-10-23 12:30:10 +02001415static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001416 unsigned size)
1417{
1418 abort();
1419}
1420
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001421static const MemoryRegionOps rom_mem_ops = {
1422 .read = error_mem_read,
1423 .write = unassigned_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
1425};
1426
Avi Kivitya8170e52012-10-23 12:30:10 +02001427static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001428 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001429{
bellard3a7d9292005-08-21 09:26:42 +00001430 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001431 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001432 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001433 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001434 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001435 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001436 switch (size) {
1437 case 1:
1438 stb_p(qemu_get_ram_ptr(ram_addr), val);
1439 break;
1440 case 2:
1441 stw_p(qemu_get_ram_ptr(ram_addr), val);
1442 break;
1443 case 4:
1444 stl_p(qemu_get_ram_ptr(ram_addr), val);
1445 break;
1446 default:
1447 abort();
1448 }
bellardf23db162005-08-21 19:12:28 +00001449 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001450 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001451 /* we remove the notdirty callback only if the code has been
1452 flushed */
1453 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001454 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001455}
1456
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001457static const MemoryRegionOps notdirty_mem_ops = {
1458 .read = error_mem_read,
1459 .write = notdirty_mem_write,
1460 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001461};
1462
pbrook0f459d12008-06-09 00:20:13 +00001463/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001464static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001465{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001466 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001467 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001468 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001469 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001470 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001471
aliguori06d55cc2008-11-18 20:24:06 +00001472 if (env->watchpoint_hit) {
1473 /* We re-entered the check after replacing the TB. Now raise
1474 * the debug interrupt so that is will trigger after the
1475 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001476 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001477 return;
1478 }
pbrook2e70f6e2008-06-29 01:03:05 +00001479 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001481 if ((vaddr == (wp->vaddr & len_mask) ||
1482 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001483 wp->flags |= BP_WATCHPOINT_HIT;
1484 if (!env->watchpoint_hit) {
1485 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001486 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001487 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1488 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001489 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001490 } else {
1491 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1492 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001493 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001494 }
aliguori06d55cc2008-11-18 20:24:06 +00001495 }
aliguori6e140f22008-11-18 20:37:55 +00001496 } else {
1497 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001498 }
1499 }
1500}
1501
pbrook6658ffb2007-03-16 23:58:11 +00001502/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1503 so these check for a hit then pass through to the normal out-of-line
1504 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001505static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001506 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001507{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001508 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1509 switch (size) {
1510 case 1: return ldub_phys(addr);
1511 case 2: return lduw_phys(addr);
1512 case 4: return ldl_phys(addr);
1513 default: abort();
1514 }
pbrook6658ffb2007-03-16 23:58:11 +00001515}
1516
Avi Kivitya8170e52012-10-23 12:30:10 +02001517static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001518 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001519{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001520 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1521 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001522 case 1:
1523 stb_phys(addr, val);
1524 break;
1525 case 2:
1526 stw_phys(addr, val);
1527 break;
1528 case 4:
1529 stl_phys(addr, val);
1530 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001531 default: abort();
1532 }
pbrook6658ffb2007-03-16 23:58:11 +00001533}
1534
Avi Kivity1ec9b902012-01-02 12:47:48 +02001535static const MemoryRegionOps watch_mem_ops = {
1536 .read = watch_mem_read,
1537 .write = watch_mem_write,
1538 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001539};
pbrook6658ffb2007-03-16 23:58:11 +00001540
Avi Kivitya8170e52012-10-23 12:30:10 +02001541static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001542 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001543{
Avi Kivity70c68e42012-01-02 12:32:48 +02001544 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001545 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001546 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001547#if defined(DEBUG_SUBPAGE)
1548 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1549 mmio, len, addr, idx);
1550#endif
blueswir1db7b5422007-05-26 17:36:03 +00001551
Avi Kivity5312bd82012-02-12 18:32:55 +02001552 section = &phys_sections[mmio->sub_section[idx]];
1553 addr += mmio->base;
1554 addr -= section->offset_within_address_space;
1555 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001556 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001557}
1558
Avi Kivitya8170e52012-10-23 12:30:10 +02001559static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001560 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001561{
Avi Kivity70c68e42012-01-02 12:32:48 +02001562 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001563 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001564 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001565#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001566 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1567 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001568 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001569#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001570
Avi Kivity5312bd82012-02-12 18:32:55 +02001571 section = &phys_sections[mmio->sub_section[idx]];
1572 addr += mmio->base;
1573 addr -= section->offset_within_address_space;
1574 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001575 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001576}
1577
Avi Kivity70c68e42012-01-02 12:32:48 +02001578static const MemoryRegionOps subpage_ops = {
1579 .read = subpage_read,
1580 .write = subpage_write,
1581 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001582};
1583
Avi Kivitya8170e52012-10-23 12:30:10 +02001584static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001585 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001586{
1587 ram_addr_t raddr = addr;
1588 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001589 switch (size) {
1590 case 1: return ldub_p(ptr);
1591 case 2: return lduw_p(ptr);
1592 case 4: return ldl_p(ptr);
1593 default: abort();
1594 }
Andreas Färber56384e82011-11-30 16:26:21 +01001595}
1596
Avi Kivitya8170e52012-10-23 12:30:10 +02001597static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001598 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001599{
1600 ram_addr_t raddr = addr;
1601 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001602 switch (size) {
1603 case 1: return stb_p(ptr, value);
1604 case 2: return stw_p(ptr, value);
1605 case 4: return stl_p(ptr, value);
1606 default: abort();
1607 }
Andreas Färber56384e82011-11-30 16:26:21 +01001608}
1609
Avi Kivityde712f92012-01-02 12:41:07 +02001610static const MemoryRegionOps subpage_ram_ops = {
1611 .read = subpage_ram_read,
1612 .write = subpage_ram_write,
1613 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001614};
1615
Anthony Liguoric227f092009-10-01 16:12:16 -05001616static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001617 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001618{
1619 int idx, eidx;
1620
1621 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1622 return -1;
1623 idx = SUBPAGE_IDX(start);
1624 eidx = SUBPAGE_IDX(end);
1625#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001626 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001627 mmio, start, end, idx, eidx, memory);
1628#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001629 if (memory_region_is_ram(phys_sections[section].mr)) {
1630 MemoryRegionSection new_section = phys_sections[section];
1631 new_section.mr = &io_mem_subpage_ram;
1632 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001633 }
blueswir1db7b5422007-05-26 17:36:03 +00001634 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001635 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001636 }
1637
1638 return 0;
1639}
1640
Avi Kivitya8170e52012-10-23 12:30:10 +02001641static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001642{
Anthony Liguoric227f092009-10-01 16:12:16 -05001643 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001644
Anthony Liguori7267c092011-08-20 22:09:37 -05001645 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001646
1647 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001648 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1649 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001650 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001651#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001652 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1653 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001654#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001655 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001656
1657 return mmio;
1658}
1659
Avi Kivity5312bd82012-02-12 18:32:55 +02001660static uint16_t dummy_section(MemoryRegion *mr)
1661{
1662 MemoryRegionSection section = {
1663 .mr = mr,
1664 .offset_within_address_space = 0,
1665 .offset_within_region = 0,
1666 .size = UINT64_MAX,
1667 };
1668
1669 return phys_section_add(&section);
1670}
1671
Avi Kivitya8170e52012-10-23 12:30:10 +02001672MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001673{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001674 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001675}
1676
Avi Kivitye9179ce2009-06-14 11:38:52 +03001677static void io_mem_init(void)
1678{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001679 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1680 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1681 "unassigned", UINT64_MAX);
1682 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1683 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001684 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1685 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001686 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1687 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001688}
1689
Avi Kivityac1970f2012-10-03 16:22:53 +02001690static void mem_begin(MemoryListener *listener)
1691{
1692 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1693
1694 destroy_all_mappings(d);
1695 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1696}
1697
Avi Kivity50c1e142012-02-08 21:36:02 +02001698static void core_begin(MemoryListener *listener)
1699{
Avi Kivity5312bd82012-02-12 18:32:55 +02001700 phys_sections_clear();
1701 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001702 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1703 phys_section_rom = dummy_section(&io_mem_rom);
1704 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001705}
1706
Avi Kivity1d711482012-10-02 18:54:45 +02001707static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001708{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001709 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001710
1711 /* since each CPU stores ram addresses in its TLB cache, we must
1712 reset the modified entries */
1713 /* XXX: slow ! */
1714 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1715 tlb_flush(env, 1);
1716 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001717}
1718
Avi Kivity93632742012-02-08 16:54:16 +02001719static void core_log_global_start(MemoryListener *listener)
1720{
1721 cpu_physical_memory_set_dirty_tracking(1);
1722}
1723
1724static void core_log_global_stop(MemoryListener *listener)
1725{
1726 cpu_physical_memory_set_dirty_tracking(0);
1727}
1728
Avi Kivity4855d412012-02-08 21:16:05 +02001729static void io_region_add(MemoryListener *listener,
1730 MemoryRegionSection *section)
1731{
Avi Kivitya2d33522012-03-05 17:40:12 +02001732 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1733
1734 mrio->mr = section->mr;
1735 mrio->offset = section->offset_within_region;
1736 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001737 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001738 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001739}
1740
1741static void io_region_del(MemoryListener *listener,
1742 MemoryRegionSection *section)
1743{
1744 isa_unassign_ioport(section->offset_within_address_space, section->size);
1745}
1746
Avi Kivity93632742012-02-08 16:54:16 +02001747static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001748 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001749 .log_global_start = core_log_global_start,
1750 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001751 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001752};
1753
Avi Kivity4855d412012-02-08 21:16:05 +02001754static MemoryListener io_memory_listener = {
1755 .region_add = io_region_add,
1756 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001757 .priority = 0,
1758};
1759
Avi Kivity1d711482012-10-02 18:54:45 +02001760static MemoryListener tcg_memory_listener = {
1761 .commit = tcg_commit,
1762};
1763
Avi Kivityac1970f2012-10-03 16:22:53 +02001764void address_space_init_dispatch(AddressSpace *as)
1765{
1766 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1767
1768 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1769 d->listener = (MemoryListener) {
1770 .begin = mem_begin,
1771 .region_add = mem_add,
1772 .region_nop = mem_add,
1773 .priority = 0,
1774 };
1775 as->dispatch = d;
1776 memory_listener_register(&d->listener, as);
1777}
1778
Avi Kivity83f3c252012-10-07 12:59:55 +02001779void address_space_destroy_dispatch(AddressSpace *as)
1780{
1781 AddressSpaceDispatch *d = as->dispatch;
1782
1783 memory_listener_unregister(&d->listener);
1784 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1785 g_free(d);
1786 as->dispatch = NULL;
1787}
1788
Avi Kivity62152b82011-07-26 14:26:14 +03001789static void memory_map_init(void)
1790{
Anthony Liguori7267c092011-08-20 22:09:37 -05001791 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001792 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001793 address_space_init(&address_space_memory, system_memory);
1794 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001795
Anthony Liguori7267c092011-08-20 22:09:37 -05001796 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001797 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001798 address_space_init(&address_space_io, system_io);
1799 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001800
Avi Kivityf6790af2012-10-02 20:13:51 +02001801 memory_listener_register(&core_memory_listener, &address_space_memory);
1802 memory_listener_register(&io_memory_listener, &address_space_io);
1803 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001804
1805 dma_context_init(&dma_context_memory, &address_space_memory,
1806 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001807}
1808
1809MemoryRegion *get_system_memory(void)
1810{
1811 return system_memory;
1812}
1813
Avi Kivity309cb472011-08-08 16:09:03 +03001814MemoryRegion *get_system_io(void)
1815{
1816 return system_io;
1817}
1818
pbrooke2eef172008-06-08 01:09:01 +00001819#endif /* !defined(CONFIG_USER_ONLY) */
1820
bellard13eb76e2004-01-24 15:23:36 +00001821/* physical memory access (slow version, mainly for debug) */
1822#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001823int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001824 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001825{
1826 int l, flags;
1827 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001828 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001829
1830 while (len > 0) {
1831 page = addr & TARGET_PAGE_MASK;
1832 l = (page + TARGET_PAGE_SIZE) - addr;
1833 if (l > len)
1834 l = len;
1835 flags = page_get_flags(page);
1836 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001837 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001838 if (is_write) {
1839 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001840 return -1;
bellard579a97f2007-11-11 14:26:47 +00001841 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001842 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001843 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001844 memcpy(p, buf, l);
1845 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001846 } else {
1847 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001848 return -1;
bellard579a97f2007-11-11 14:26:47 +00001849 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001850 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001851 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001852 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001853 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001854 }
1855 len -= l;
1856 buf += l;
1857 addr += l;
1858 }
Paul Brooka68fe892010-03-01 00:08:59 +00001859 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001860}
bellard8df1cd02005-01-28 22:37:22 +00001861
bellard13eb76e2004-01-24 15:23:36 +00001862#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001863
Avi Kivitya8170e52012-10-23 12:30:10 +02001864static void invalidate_and_set_dirty(hwaddr addr,
1865 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001866{
1867 if (!cpu_physical_memory_is_dirty(addr)) {
1868 /* invalidate code */
1869 tb_invalidate_phys_page_range(addr, addr + length, 0);
1870 /* set dirty bit */
1871 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1872 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001873 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001874}
1875
Avi Kivitya8170e52012-10-23 12:30:10 +02001876void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001877 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001878{
Avi Kivityac1970f2012-10-03 16:22:53 +02001879 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001880 int l;
bellard13eb76e2004-01-24 15:23:36 +00001881 uint8_t *ptr;
1882 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001883 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001884 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001885
bellard13eb76e2004-01-24 15:23:36 +00001886 while (len > 0) {
1887 page = addr & TARGET_PAGE_MASK;
1888 l = (page + TARGET_PAGE_SIZE) - addr;
1889 if (l > len)
1890 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001891 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001892
bellard13eb76e2004-01-24 15:23:36 +00001893 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001894 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001895 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001896 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001897 /* XXX: could force cpu_single_env to NULL to avoid
1898 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001899 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001900 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001901 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001902 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001903 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001904 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001905 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001906 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001907 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001908 l = 2;
1909 } else {
bellard1c213d12005-09-03 10:49:04 +00001910 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001911 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001912 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001913 l = 1;
1914 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001915 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001916 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001917 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001918 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001919 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001920 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001921 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001922 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001923 }
1924 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001925 if (!(memory_region_is_ram(section->mr) ||
1926 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001927 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001928 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001929 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001930 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001931 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001932 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001933 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001934 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001935 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001936 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001937 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001938 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001939 l = 2;
1940 } else {
bellard1c213d12005-09-03 10:49:04 +00001941 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001942 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001943 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001944 l = 1;
1945 }
1946 } else {
1947 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001948 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001949 + memory_region_section_addr(section,
1950 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001951 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001952 }
1953 }
1954 len -= l;
1955 buf += l;
1956 addr += l;
1957 }
1958}
bellard8df1cd02005-01-28 22:37:22 +00001959
Avi Kivitya8170e52012-10-23 12:30:10 +02001960void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001961 const uint8_t *buf, int len)
1962{
1963 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1964}
1965
1966/**
1967 * address_space_read: read from an address space.
1968 *
1969 * @as: #AddressSpace to be accessed
1970 * @addr: address within that address space
1971 * @buf: buffer with the data transferred
1972 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001973void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001974{
1975 address_space_rw(as, addr, buf, len, false);
1976}
1977
1978
Avi Kivitya8170e52012-10-23 12:30:10 +02001979void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001980 int len, int is_write)
1981{
1982 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1983}
1984
bellardd0ecd2a2006-04-23 17:14:48 +00001985/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001986void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001987 const uint8_t *buf, int len)
1988{
Avi Kivityac1970f2012-10-03 16:22:53 +02001989 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001990 int l;
1991 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001992 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001993 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001994
bellardd0ecd2a2006-04-23 17:14:48 +00001995 while (len > 0) {
1996 page = addr & TARGET_PAGE_MASK;
1997 l = (page + TARGET_PAGE_SIZE) - addr;
1998 if (l > len)
1999 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002000 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002001
Blue Swirlcc5bea62012-04-14 14:56:48 +00002002 if (!(memory_region_is_ram(section->mr) ||
2003 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002004 /* do nothing */
2005 } else {
2006 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002007 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002008 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002009 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002010 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002011 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002012 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002013 }
2014 len -= l;
2015 buf += l;
2016 addr += l;
2017 }
2018}
2019
aliguori6d16c2f2009-01-22 16:59:11 +00002020typedef struct {
2021 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002022 hwaddr addr;
2023 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002024} BounceBuffer;
2025
2026static BounceBuffer bounce;
2027
aliguoriba223c22009-01-22 16:59:16 +00002028typedef struct MapClient {
2029 void *opaque;
2030 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002031 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002032} MapClient;
2033
Blue Swirl72cf2d42009-09-12 07:36:22 +00002034static QLIST_HEAD(map_client_list, MapClient) map_client_list
2035 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002036
2037void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2038{
Anthony Liguori7267c092011-08-20 22:09:37 -05002039 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002040
2041 client->opaque = opaque;
2042 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002043 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002044 return client;
2045}
2046
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002047static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002048{
2049 MapClient *client = (MapClient *)_client;
2050
Blue Swirl72cf2d42009-09-12 07:36:22 +00002051 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002052 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002053}
2054
2055static void cpu_notify_map_clients(void)
2056{
2057 MapClient *client;
2058
Blue Swirl72cf2d42009-09-12 07:36:22 +00002059 while (!QLIST_EMPTY(&map_client_list)) {
2060 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002061 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002062 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002063 }
2064}
2065
aliguori6d16c2f2009-01-22 16:59:11 +00002066/* Map a physical memory region into a host virtual address.
2067 * May map a subset of the requested range, given by and returned in *plen.
2068 * May return NULL if resources needed to perform the mapping are exhausted.
2069 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002070 * Use cpu_register_map_client() to know when retrying the map operation is
2071 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002072 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002073void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002074 hwaddr addr,
2075 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002076 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002077{
Avi Kivityac1970f2012-10-03 16:22:53 +02002078 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002079 hwaddr len = *plen;
2080 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002081 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002082 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002083 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002084 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002085 ram_addr_t rlen;
2086 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002087
2088 while (len > 0) {
2089 page = addr & TARGET_PAGE_MASK;
2090 l = (page + TARGET_PAGE_SIZE) - addr;
2091 if (l > len)
2092 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002093 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002094
Avi Kivityf3705d52012-03-08 16:16:34 +02002095 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002096 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002097 break;
2098 }
2099 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2100 bounce.addr = addr;
2101 bounce.len = l;
2102 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002103 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002104 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002105
2106 *plen = l;
2107 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002108 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002109 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002110 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002111 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002112 }
aliguori6d16c2f2009-01-22 16:59:11 +00002113
2114 len -= l;
2115 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002116 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002117 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002118 rlen = todo;
2119 ret = qemu_ram_ptr_length(raddr, &rlen);
2120 *plen = rlen;
2121 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002122}
2123
Avi Kivityac1970f2012-10-03 16:22:53 +02002124/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002125 * Will also mark the memory as dirty if is_write == 1. access_len gives
2126 * the amount of memory that was actually read or written by the caller.
2127 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002128void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2129 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002130{
2131 if (buffer != bounce.buffer) {
2132 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002133 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002134 while (access_len) {
2135 unsigned l;
2136 l = TARGET_PAGE_SIZE;
2137 if (l > access_len)
2138 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002139 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002140 addr1 += l;
2141 access_len -= l;
2142 }
2143 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002144 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002145 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002146 }
aliguori6d16c2f2009-01-22 16:59:11 +00002147 return;
2148 }
2149 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002150 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002151 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002152 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002153 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002154 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002155}
bellardd0ecd2a2006-04-23 17:14:48 +00002156
Avi Kivitya8170e52012-10-23 12:30:10 +02002157void *cpu_physical_memory_map(hwaddr addr,
2158 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002159 int is_write)
2160{
2161 return address_space_map(&address_space_memory, addr, plen, is_write);
2162}
2163
Avi Kivitya8170e52012-10-23 12:30:10 +02002164void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2165 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002166{
2167 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2168}
2169
bellard8df1cd02005-01-28 22:37:22 +00002170/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002171static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002172 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002173{
bellard8df1cd02005-01-28 22:37:22 +00002174 uint8_t *ptr;
2175 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002176 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002177
Avi Kivityac1970f2012-10-03 16:22:53 +02002178 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002179
Blue Swirlcc5bea62012-04-14 14:56:48 +00002180 if (!(memory_region_is_ram(section->mr) ||
2181 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002182 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002183 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002184 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002185#if defined(TARGET_WORDS_BIGENDIAN)
2186 if (endian == DEVICE_LITTLE_ENDIAN) {
2187 val = bswap32(val);
2188 }
2189#else
2190 if (endian == DEVICE_BIG_ENDIAN) {
2191 val = bswap32(val);
2192 }
2193#endif
bellard8df1cd02005-01-28 22:37:22 +00002194 } else {
2195 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002196 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002197 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002198 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002199 switch (endian) {
2200 case DEVICE_LITTLE_ENDIAN:
2201 val = ldl_le_p(ptr);
2202 break;
2203 case DEVICE_BIG_ENDIAN:
2204 val = ldl_be_p(ptr);
2205 break;
2206 default:
2207 val = ldl_p(ptr);
2208 break;
2209 }
bellard8df1cd02005-01-28 22:37:22 +00002210 }
2211 return val;
2212}
2213
Avi Kivitya8170e52012-10-23 12:30:10 +02002214uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002215{
2216 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2217}
2218
Avi Kivitya8170e52012-10-23 12:30:10 +02002219uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002220{
2221 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2222}
2223
Avi Kivitya8170e52012-10-23 12:30:10 +02002224uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002225{
2226 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2227}
2228
bellard84b7b8e2005-11-28 21:19:04 +00002229/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002230static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002231 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002232{
bellard84b7b8e2005-11-28 21:19:04 +00002233 uint8_t *ptr;
2234 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002235 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002236
Avi Kivityac1970f2012-10-03 16:22:53 +02002237 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002238
Blue Swirlcc5bea62012-04-14 14:56:48 +00002239 if (!(memory_region_is_ram(section->mr) ||
2240 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002241 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002242 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002243
2244 /* XXX This is broken when device endian != cpu endian.
2245 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002246#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002247 val = io_mem_read(section->mr, addr, 4) << 32;
2248 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002249#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002250 val = io_mem_read(section->mr, addr, 4);
2251 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002252#endif
2253 } else {
2254 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002255 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002256 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002257 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002258 switch (endian) {
2259 case DEVICE_LITTLE_ENDIAN:
2260 val = ldq_le_p(ptr);
2261 break;
2262 case DEVICE_BIG_ENDIAN:
2263 val = ldq_be_p(ptr);
2264 break;
2265 default:
2266 val = ldq_p(ptr);
2267 break;
2268 }
bellard84b7b8e2005-11-28 21:19:04 +00002269 }
2270 return val;
2271}
2272
Avi Kivitya8170e52012-10-23 12:30:10 +02002273uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002274{
2275 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2276}
2277
Avi Kivitya8170e52012-10-23 12:30:10 +02002278uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002279{
2280 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2281}
2282
Avi Kivitya8170e52012-10-23 12:30:10 +02002283uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002284{
2285 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2286}
2287
bellardaab33092005-10-30 20:48:42 +00002288/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002289uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002290{
2291 uint8_t val;
2292 cpu_physical_memory_read(addr, &val, 1);
2293 return val;
2294}
2295
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002296/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002297static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002298 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002299{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002300 uint8_t *ptr;
2301 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002302 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002303
Avi Kivityac1970f2012-10-03 16:22:53 +02002304 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002305
Blue Swirlcc5bea62012-04-14 14:56:48 +00002306 if (!(memory_region_is_ram(section->mr) ||
2307 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002308 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002309 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002310 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002311#if defined(TARGET_WORDS_BIGENDIAN)
2312 if (endian == DEVICE_LITTLE_ENDIAN) {
2313 val = bswap16(val);
2314 }
2315#else
2316 if (endian == DEVICE_BIG_ENDIAN) {
2317 val = bswap16(val);
2318 }
2319#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002320 } else {
2321 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002322 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002323 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002324 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002325 switch (endian) {
2326 case DEVICE_LITTLE_ENDIAN:
2327 val = lduw_le_p(ptr);
2328 break;
2329 case DEVICE_BIG_ENDIAN:
2330 val = lduw_be_p(ptr);
2331 break;
2332 default:
2333 val = lduw_p(ptr);
2334 break;
2335 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002336 }
2337 return val;
bellardaab33092005-10-30 20:48:42 +00002338}
2339
Avi Kivitya8170e52012-10-23 12:30:10 +02002340uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002341{
2342 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2343}
2344
Avi Kivitya8170e52012-10-23 12:30:10 +02002345uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002346{
2347 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2348}
2349
Avi Kivitya8170e52012-10-23 12:30:10 +02002350uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002351{
2352 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2353}
2354
bellard8df1cd02005-01-28 22:37:22 +00002355/* warning: addr must be aligned. The ram page is not masked as dirty
2356 and the code inside is not invalidated. It is useful if the dirty
2357 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002358void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002359{
bellard8df1cd02005-01-28 22:37:22 +00002360 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002361 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002362
Avi Kivityac1970f2012-10-03 16:22:53 +02002363 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002364
Avi Kivityf3705d52012-03-08 16:16:34 +02002365 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002366 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002367 if (memory_region_is_ram(section->mr)) {
2368 section = &phys_sections[phys_section_rom];
2369 }
2370 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002371 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002372 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002373 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002374 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002375 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002376 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002377
2378 if (unlikely(in_migration)) {
2379 if (!cpu_physical_memory_is_dirty(addr1)) {
2380 /* invalidate code */
2381 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2382 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002383 cpu_physical_memory_set_dirty_flags(
2384 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002385 }
2386 }
bellard8df1cd02005-01-28 22:37:22 +00002387 }
2388}
2389
2390/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002391static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002392 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002393{
bellard8df1cd02005-01-28 22:37:22 +00002394 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002395 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002396
Avi Kivityac1970f2012-10-03 16:22:53 +02002397 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002398
Avi Kivityf3705d52012-03-08 16:16:34 +02002399 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002400 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002401 if (memory_region_is_ram(section->mr)) {
2402 section = &phys_sections[phys_section_rom];
2403 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002404#if defined(TARGET_WORDS_BIGENDIAN)
2405 if (endian == DEVICE_LITTLE_ENDIAN) {
2406 val = bswap32(val);
2407 }
2408#else
2409 if (endian == DEVICE_BIG_ENDIAN) {
2410 val = bswap32(val);
2411 }
2412#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002413 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002414 } else {
2415 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002416 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002417 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002418 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002419 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002420 switch (endian) {
2421 case DEVICE_LITTLE_ENDIAN:
2422 stl_le_p(ptr, val);
2423 break;
2424 case DEVICE_BIG_ENDIAN:
2425 stl_be_p(ptr, val);
2426 break;
2427 default:
2428 stl_p(ptr, val);
2429 break;
2430 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002431 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002432 }
2433}
2434
Avi Kivitya8170e52012-10-23 12:30:10 +02002435void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002436{
2437 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2438}
2439
Avi Kivitya8170e52012-10-23 12:30:10 +02002440void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002441{
2442 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2443}
2444
Avi Kivitya8170e52012-10-23 12:30:10 +02002445void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002446{
2447 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2448}
2449
bellardaab33092005-10-30 20:48:42 +00002450/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002451void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002452{
2453 uint8_t v = val;
2454 cpu_physical_memory_write(addr, &v, 1);
2455}
2456
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002457/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002458static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002459 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002460{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002461 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002462 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002463
Avi Kivityac1970f2012-10-03 16:22:53 +02002464 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002465
Avi Kivityf3705d52012-03-08 16:16:34 +02002466 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002467 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002468 if (memory_region_is_ram(section->mr)) {
2469 section = &phys_sections[phys_section_rom];
2470 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002471#if defined(TARGET_WORDS_BIGENDIAN)
2472 if (endian == DEVICE_LITTLE_ENDIAN) {
2473 val = bswap16(val);
2474 }
2475#else
2476 if (endian == DEVICE_BIG_ENDIAN) {
2477 val = bswap16(val);
2478 }
2479#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002480 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002481 } else {
2482 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002483 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002484 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002485 /* RAM case */
2486 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002487 switch (endian) {
2488 case DEVICE_LITTLE_ENDIAN:
2489 stw_le_p(ptr, val);
2490 break;
2491 case DEVICE_BIG_ENDIAN:
2492 stw_be_p(ptr, val);
2493 break;
2494 default:
2495 stw_p(ptr, val);
2496 break;
2497 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002498 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002499 }
bellardaab33092005-10-30 20:48:42 +00002500}
2501
Avi Kivitya8170e52012-10-23 12:30:10 +02002502void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002503{
2504 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2505}
2506
Avi Kivitya8170e52012-10-23 12:30:10 +02002507void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002508{
2509 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2510}
2511
Avi Kivitya8170e52012-10-23 12:30:10 +02002512void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002513{
2514 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2515}
2516
bellardaab33092005-10-30 20:48:42 +00002517/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002518void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002519{
2520 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002521 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002522}
2523
Avi Kivitya8170e52012-10-23 12:30:10 +02002524void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002525{
2526 val = cpu_to_le64(val);
2527 cpu_physical_memory_write(addr, &val, 8);
2528}
2529
Avi Kivitya8170e52012-10-23 12:30:10 +02002530void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002531{
2532 val = cpu_to_be64(val);
2533 cpu_physical_memory_write(addr, &val, 8);
2534}
2535
aliguori5e2972f2009-03-28 17:51:36 +00002536/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002537int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002538 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002539{
2540 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002541 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002542 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002543
2544 while (len > 0) {
2545 page = addr & TARGET_PAGE_MASK;
2546 phys_addr = cpu_get_phys_page_debug(env, page);
2547 /* if no physical page mapped, return an error */
2548 if (phys_addr == -1)
2549 return -1;
2550 l = (page + TARGET_PAGE_SIZE) - addr;
2551 if (l > len)
2552 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002553 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002554 if (is_write)
2555 cpu_physical_memory_write_rom(phys_addr, buf, l);
2556 else
aliguori5e2972f2009-03-28 17:51:36 +00002557 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002558 len -= l;
2559 buf += l;
2560 addr += l;
2561 }
2562 return 0;
2563}
Paul Brooka68fe892010-03-01 00:08:59 +00002564#endif
bellard13eb76e2004-01-24 15:23:36 +00002565
Blue Swirl8e4a4242013-01-06 18:30:17 +00002566#if !defined(CONFIG_USER_ONLY)
2567
2568/*
2569 * A helper function for the _utterly broken_ virtio device model to find out if
2570 * it's running on a big endian machine. Don't do this at home kids!
2571 */
2572bool virtio_is_big_endian(void);
2573bool virtio_is_big_endian(void)
2574{
2575#if defined(TARGET_WORDS_BIGENDIAN)
2576 return true;
2577#else
2578 return false;
2579#endif
2580}
2581
2582#endif
2583
Wen Congyang76f35532012-05-07 12:04:18 +08002584#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002585bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002586{
2587 MemoryRegionSection *section;
2588
Avi Kivityac1970f2012-10-03 16:22:53 +02002589 section = phys_page_find(address_space_memory.dispatch,
2590 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002591
2592 return !(memory_region_is_ram(section->mr) ||
2593 memory_region_is_romd(section->mr));
2594}
2595#endif