blob: 3a9ddcb41f4c477c8247ad9ebfa7ebc061935bae [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200190
Avi Kivity07f07b32012-02-13 20:45:32 +0200191 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200192 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinifd298932013-05-20 12:21:07 +0200193 return &phys_sections[phys_section_unassigned];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200195 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200196 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200197 }
Paolo Bonzinifd298932013-05-20 12:21:07 +0200198 return &phys_sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200199}
200
Blue Swirle5548612012-04-21 13:08:33 +0000201bool memory_region_is_unassigned(MemoryRegion *mr)
202{
203 return mr != &io_mem_ram && mr != &io_mem_rom
204 && mr != &io_mem_notdirty && !mr->rom_device
205 && mr != &io_mem_watch;
206}
bellard9fa3e852004-01-04 18:06:42 +0000207#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000208
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200209void cpu_exec_init_all(void)
210{
211#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700212 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213 memory_map_init();
214 io_mem_init();
215#endif
216}
217
Andreas Färberb170fce2013-01-20 20:23:22 +0100218#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000219
Juan Quintelae59fb372009-09-29 22:48:21 +0200220static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200221{
Andreas Färber259186a2013-01-17 18:51:17 +0100222 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200223
aurel323098dba2009-03-07 21:28:24 +0000224 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
225 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100226 cpu->interrupt_request &= ~0x01;
227 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000228
229 return 0;
230}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200231
232static const VMStateDescription vmstate_cpu_common = {
233 .name = "cpu_common",
234 .version_id = 1,
235 .minimum_version_id = 1,
236 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200237 .post_load = cpu_common_post_load,
238 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100239 VMSTATE_UINT32(halted, CPUState),
240 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 VMSTATE_END_OF_LIST()
242 }
243};
Andreas Färberb170fce2013-01-20 20:23:22 +0100244#else
245#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000246#endif
247
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100248CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400249{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100250 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100251 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400252
253 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100254 cpu = ENV_GET_CPU(env);
255 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400256 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100257 }
Glauber Costa950f1472009-06-09 12:15:18 -0400258 env = env->next_cpu;
259 }
260
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100261 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400262}
263
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200264void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
265{
266 CPUArchState *env = first_cpu;
267
268 while (env) {
269 func(ENV_GET_CPU(env), data);
270 env = env->next_cpu;
271 }
272}
273
Andreas Färber9349b4f2012-03-14 01:38:32 +0100274void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000275{
Andreas Färber9f09e182012-05-03 06:59:07 +0200276 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100277 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100278 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000279 int cpu_index;
280
pbrookc2764712009-03-07 15:24:59 +0000281#if defined(CONFIG_USER_ONLY)
282 cpu_list_lock();
283#endif
bellard6a00d602005-11-21 23:25:50 +0000284 env->next_cpu = NULL;
285 penv = &first_cpu;
286 cpu_index = 0;
287 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700288 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000289 cpu_index++;
290 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100291 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100292 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000293 QTAILQ_INIT(&env->breakpoints);
294 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100295#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200296 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100297#endif
bellard6a00d602005-11-21 23:25:50 +0000298 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000299#if defined(CONFIG_USER_ONLY)
300 cpu_list_unlock();
301#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100302 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000303#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600304 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000305 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100306 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000307#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100308 if (cc->vmsd != NULL) {
309 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
310 }
bellardfd6ce8f2003-05-14 19:00:11 +0000311}
312
bellard1fddef42005-04-17 19:16:13 +0000313#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000314#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100315static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000316{
317 tb_invalidate_phys_page_range(pc, pc + 1, 0);
318}
319#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400320static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
321{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400322 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
323 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400324}
bellardc27004e2005-01-03 23:35:10 +0000325#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000326#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000327
Paul Brookc527ee82010-03-01 03:31:14 +0000328#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100329void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000330
331{
332}
333
Andreas Färber9349b4f2012-03-14 01:38:32 +0100334int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000335 int flags, CPUWatchpoint **watchpoint)
336{
337 return -ENOSYS;
338}
339#else
pbrook6658ffb2007-03-16 23:58:11 +0000340/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100341int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000342 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000343{
aliguorib4051332008-11-18 20:14:20 +0000344 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000345 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000346
aliguorib4051332008-11-18 20:14:20 +0000347 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400348 if ((len & (len - 1)) || (addr & ~len_mask) ||
349 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000350 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
351 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
352 return -EINVAL;
353 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500354 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000355
aliguoria1d1bb32008-11-18 20:07:32 +0000356 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000357 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000358 wp->flags = flags;
359
aliguori2dc9f412008-11-18 20:56:59 +0000360 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000361 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000362 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000363 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000364 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000365
pbrook6658ffb2007-03-16 23:58:11 +0000366 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000367
368 if (watchpoint)
369 *watchpoint = wp;
370 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000371}
372
aliguoria1d1bb32008-11-18 20:07:32 +0000373/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100374int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000375 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000376{
aliguorib4051332008-11-18 20:14:20 +0000377 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000378 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000379
Blue Swirl72cf2d42009-09-12 07:36:22 +0000380 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000381 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000382 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000383 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000384 return 0;
385 }
386 }
aliguoria1d1bb32008-11-18 20:07:32 +0000387 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000388}
389
aliguoria1d1bb32008-11-18 20:07:32 +0000390/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100391void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000392{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000393 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000394
aliguoria1d1bb32008-11-18 20:07:32 +0000395 tlb_flush_page(env, watchpoint->vaddr);
396
Anthony Liguori7267c092011-08-20 22:09:37 -0500397 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000398}
399
aliguoria1d1bb32008-11-18 20:07:32 +0000400/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100401void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000402{
aliguoric0ce9982008-11-25 22:13:57 +0000403 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000404
Blue Swirl72cf2d42009-09-12 07:36:22 +0000405 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000406 if (wp->flags & mask)
407 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000408 }
aliguoria1d1bb32008-11-18 20:07:32 +0000409}
Paul Brookc527ee82010-03-01 03:31:14 +0000410#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000411
412/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100413int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000414 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000415{
bellard1fddef42005-04-17 19:16:13 +0000416#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000417 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000418
Anthony Liguori7267c092011-08-20 22:09:37 -0500419 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000420
421 bp->pc = pc;
422 bp->flags = flags;
423
aliguori2dc9f412008-11-18 20:56:59 +0000424 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000425 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000426 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000427 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000428 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000429
430 breakpoint_invalidate(env, pc);
431
432 if (breakpoint)
433 *breakpoint = bp;
434 return 0;
435#else
436 return -ENOSYS;
437#endif
438}
439
440/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100441int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000442{
443#if defined(TARGET_HAS_ICE)
444 CPUBreakpoint *bp;
445
Blue Swirl72cf2d42009-09-12 07:36:22 +0000446 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000447 if (bp->pc == pc && bp->flags == flags) {
448 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000449 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000450 }
bellard4c3a88a2003-07-26 12:06:08 +0000451 }
aliguoria1d1bb32008-11-18 20:07:32 +0000452 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000453#else
aliguoria1d1bb32008-11-18 20:07:32 +0000454 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000455#endif
456}
457
aliguoria1d1bb32008-11-18 20:07:32 +0000458/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100459void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000460{
bellard1fddef42005-04-17 19:16:13 +0000461#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000462 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000463
aliguoria1d1bb32008-11-18 20:07:32 +0000464 breakpoint_invalidate(env, breakpoint->pc);
465
Anthony Liguori7267c092011-08-20 22:09:37 -0500466 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000467#endif
468}
469
470/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100471void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000472{
473#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000474 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000475
Blue Swirl72cf2d42009-09-12 07:36:22 +0000476 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000477 if (bp->flags & mask)
478 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000479 }
bellard4c3a88a2003-07-26 12:06:08 +0000480#endif
481}
482
bellardc33a3462003-07-29 20:50:33 +0000483/* enable or disable single step mode. EXCP_DEBUG is returned by the
484 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100485void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000486{
bellard1fddef42005-04-17 19:16:13 +0000487#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000488 if (env->singlestep_enabled != enabled) {
489 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000490 if (kvm_enabled())
491 kvm_update_guest_debug(env, 0);
492 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100493 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000494 /* XXX: only flush what is necessary */
495 tb_flush(env);
496 }
bellardc33a3462003-07-29 20:50:33 +0000497 }
498#endif
499}
500
Andreas Färber9349b4f2012-03-14 01:38:32 +0100501void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000502{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100503 CPUState *cpu = ENV_GET_CPU(env);
504
505 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000506 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000507}
508
Andreas Färber9349b4f2012-03-14 01:38:32 +0100509void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000510{
511 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000512 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000513
514 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000515 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000516 fprintf(stderr, "qemu: fatal: ");
517 vfprintf(stderr, fmt, ap);
518 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100519 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000520 if (qemu_log_enabled()) {
521 qemu_log("qemu: fatal: ");
522 qemu_log_vprintf(fmt, ap2);
523 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100524 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000525 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000526 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000527 }
pbrook493ae1f2007-11-23 16:53:59 +0000528 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000529 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200530#if defined(CONFIG_USER_ONLY)
531 {
532 struct sigaction act;
533 sigfillset(&act.sa_mask);
534 act.sa_handler = SIG_DFL;
535 sigaction(SIGABRT, &act, NULL);
536 }
537#endif
bellard75012672003-06-21 13:11:07 +0000538 abort();
539}
540
Andreas Färber9349b4f2012-03-14 01:38:32 +0100541CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000542{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100543 CPUArchState *new_env = cpu_init(env->cpu_model_str);
544 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000545#if defined(TARGET_HAS_ICE)
546 CPUBreakpoint *bp;
547 CPUWatchpoint *wp;
548#endif
549
Andreas Färber9349b4f2012-03-14 01:38:32 +0100550 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000551
Andreas Färber55e5c282012-12-17 06:18:02 +0100552 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000553 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000554
555 /* Clone all break/watchpoints.
556 Note: Once we support ptrace with hw-debug register access, make sure
557 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000558 QTAILQ_INIT(&env->breakpoints);
559 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000560#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000561 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000562 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
563 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000564 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000565 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
566 wp->flags, NULL);
567 }
568#endif
569
thsc5be9f02007-02-28 20:20:53 +0000570 return new_env;
571}
572
bellard01243112004-01-04 15:48:17 +0000573#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200574static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
575 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000576{
Juan Quintelad24981d2012-05-22 00:42:40 +0200577 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000578
bellard1ccde1c2004-02-06 19:46:14 +0000579 /* we modify the TLB cache so that the dirty bit will be set again
580 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200581 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200582 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000583 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200584 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000585 != (end - 1) - start) {
586 abort();
587 }
Blue Swirle5548612012-04-21 13:08:33 +0000588 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200589
590}
591
592/* Note: start and end must be within the same ram block. */
593void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
594 int dirty_flags)
595{
596 uintptr_t length;
597
598 start &= TARGET_PAGE_MASK;
599 end = TARGET_PAGE_ALIGN(end);
600
601 length = end - start;
602 if (length == 0)
603 return;
604 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
605
606 if (tcg_enabled()) {
607 tlb_reset_dirty_range_all(start, end, length);
608 }
bellard1ccde1c2004-02-06 19:46:14 +0000609}
610
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000611static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000612{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200613 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000614 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200615 return ret;
aliguori74576192008-10-06 14:02:03 +0000616}
617
Avi Kivitya8170e52012-10-23 12:30:10 +0200618hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000619 MemoryRegionSection *section,
620 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200621 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000622 int prot,
623 target_ulong *address)
624{
Avi Kivitya8170e52012-10-23 12:30:10 +0200625 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000626 CPUWatchpoint *wp;
627
Blue Swirlcc5bea62012-04-14 14:56:48 +0000628 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000629 /* Normal RAM. */
630 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000631 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000632 if (!section->readonly) {
633 iotlb |= phys_section_notdirty;
634 } else {
635 iotlb |= phys_section_rom;
636 }
637 } else {
Blue Swirle5548612012-04-21 13:08:33 +0000638 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000639 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000640 }
641
642 /* Make accesses to pages with watchpoints go via the
643 watchpoint trap routines. */
644 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
645 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
646 /* Avoid trapping reads of pages with a write breakpoint. */
647 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
648 iotlb = phys_section_watch + paddr;
649 *address |= TLB_MMIO;
650 break;
651 }
652 }
653 }
654
655 return iotlb;
656}
bellard9fa3e852004-01-04 18:06:42 +0000657#endif /* defined(CONFIG_USER_ONLY) */
658
pbrooke2eef172008-06-08 01:09:01 +0000659#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000660
Paul Brookc04b2b72010-03-01 03:31:14 +0000661#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
662typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200663 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200664 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200665 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000666} subpage_t;
667
Anthony Liguoric227f092009-10-01 16:12:16 -0500668static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200670static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200671static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200672{
Avi Kivity5312bd82012-02-12 18:32:55 +0200673 MemoryRegionSection *section = &phys_sections[section_index];
674 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200675
676 if (mr->subpage) {
677 subpage_t *subpage = container_of(mr, subpage_t, iomem);
678 memory_region_destroy(&subpage->iomem);
679 g_free(subpage);
680 }
681}
682
Avi Kivity4346ae32012-02-10 17:00:01 +0200683static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200684{
685 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200686 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200687
Avi Kivityc19e8802012-02-13 20:25:31 +0200688 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200689 return;
690 }
691
Avi Kivityc19e8802012-02-13 20:25:31 +0200692 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200693 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200694 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200695 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200696 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200697 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200698 }
Avi Kivity54688b12012-02-09 17:34:32 +0200699 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200700 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200701 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200702}
703
Avi Kivityac1970f2012-10-03 16:22:53 +0200704static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200705{
Avi Kivityac1970f2012-10-03 16:22:53 +0200706 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200707 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200708}
709
Avi Kivity5312bd82012-02-12 18:32:55 +0200710static uint16_t phys_section_add(MemoryRegionSection *section)
711{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200712 /* The physical section number is ORed with a page-aligned
713 * pointer to produce the iotlb entries. Thus it should
714 * never overflow into the page-aligned value.
715 */
716 assert(phys_sections_nb < TARGET_PAGE_SIZE);
717
Avi Kivity5312bd82012-02-12 18:32:55 +0200718 if (phys_sections_nb == phys_sections_nb_alloc) {
719 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
720 phys_sections = g_renew(MemoryRegionSection, phys_sections,
721 phys_sections_nb_alloc);
722 }
723 phys_sections[phys_sections_nb] = *section;
724 return phys_sections_nb++;
725}
726
727static void phys_sections_clear(void)
728{
729 phys_sections_nb = 0;
730}
731
Avi Kivityac1970f2012-10-03 16:22:53 +0200732static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200733{
734 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200735 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200736 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200737 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200738 MemoryRegionSection subsection = {
739 .offset_within_address_space = base,
740 .size = TARGET_PAGE_SIZE,
741 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200742 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200743
Avi Kivityf3705d52012-03-08 16:16:34 +0200744 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200745
Avi Kivityf3705d52012-03-08 16:16:34 +0200746 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200747 subpage = subpage_init(base);
748 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200749 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200750 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200751 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200752 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200753 }
754 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400755 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200756 subpage_register(subpage, start, end, phys_section_add(section));
757}
758
759
Avi Kivityac1970f2012-10-03 16:22:53 +0200760static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000761{
Avi Kivitya8170e52012-10-23 12:30:10 +0200762 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200763 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200764 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200765 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200766
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200767 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200768
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200769 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200770 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200771 section_index);
bellard33417e72003-08-10 21:47:01 +0000772}
773
Avi Kivity86a86232012-10-30 13:47:45 +0200774QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
775
776static MemoryRegionSection limit(MemoryRegionSection section)
777{
778 section.size = MIN(section.offset_within_address_space + section.size,
779 MAX_PHYS_ADDR + 1)
780 - section.offset_within_address_space;
781
782 return section;
783}
784
Avi Kivityac1970f2012-10-03 16:22:53 +0200785static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200786{
Avi Kivityac1970f2012-10-03 16:22:53 +0200787 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity86a86232012-10-30 13:47:45 +0200788 MemoryRegionSection now = limit(*section), remain = limit(*section);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200789
790 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
791 || (now.size < TARGET_PAGE_SIZE)) {
792 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
793 - now.offset_within_address_space,
794 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200795 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200796 remain.size -= now.size;
797 remain.offset_within_address_space += now.size;
798 remain.offset_within_region += now.size;
799 }
Tyler Hall69b67642012-07-25 18:45:04 -0400800 while (remain.size >= TARGET_PAGE_SIZE) {
801 now = remain;
802 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
803 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200804 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400805 } else {
806 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200807 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400808 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200809 remain.size -= now.size;
810 remain.offset_within_address_space += now.size;
811 remain.offset_within_region += now.size;
812 }
813 now = remain;
814 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200815 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200816 }
817}
818
Sheng Yang62a27442010-01-26 19:21:16 +0800819void qemu_flush_coalesced_mmio_buffer(void)
820{
821 if (kvm_enabled())
822 kvm_flush_coalesced_mmio_buffer();
823}
824
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700825void qemu_mutex_lock_ramlist(void)
826{
827 qemu_mutex_lock(&ram_list.mutex);
828}
829
830void qemu_mutex_unlock_ramlist(void)
831{
832 qemu_mutex_unlock(&ram_list.mutex);
833}
834
Marcelo Tosattic9027602010-03-01 20:25:08 -0300835#if defined(__linux__) && !defined(TARGET_S390X)
836
837#include <sys/vfs.h>
838
839#define HUGETLBFS_MAGIC 0x958458f6
840
841static long gethugepagesize(const char *path)
842{
843 struct statfs fs;
844 int ret;
845
846 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900847 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300848 } while (ret != 0 && errno == EINTR);
849
850 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900851 perror(path);
852 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300853 }
854
855 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900856 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300857
858 return fs.f_bsize;
859}
860
Alex Williamson04b16652010-07-02 11:13:17 -0600861static void *file_ram_alloc(RAMBlock *block,
862 ram_addr_t memory,
863 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300864{
865 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500866 char *sanitized_name;
867 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300868 void *area;
869 int fd;
870#ifdef MAP_POPULATE
871 int flags;
872#endif
873 unsigned long hpagesize;
874
875 hpagesize = gethugepagesize(path);
876 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900877 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300878 }
879
880 if (memory < hpagesize) {
881 return NULL;
882 }
883
884 if (kvm_enabled() && !kvm_has_sync_mmu()) {
885 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
886 return NULL;
887 }
888
Peter Feiner8ca761f2013-03-04 13:54:25 -0500889 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
890 sanitized_name = g_strdup(block->mr->name);
891 for (c = sanitized_name; *c != '\0'; c++) {
892 if (*c == '/')
893 *c = '_';
894 }
895
896 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
897 sanitized_name);
898 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300899
900 fd = mkstemp(filename);
901 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900902 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100903 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900904 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300905 }
906 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100907 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300908
909 memory = (memory+hpagesize-1) & ~(hpagesize-1);
910
911 /*
912 * ftruncate is not supported by hugetlbfs in older
913 * hosts, so don't bother bailing out on errors.
914 * If anything goes wrong with it under other filesystems,
915 * mmap will fail.
916 */
917 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900918 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300919
920#ifdef MAP_POPULATE
921 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
922 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
923 * to sidestep this quirk.
924 */
925 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
926 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
927#else
928 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
929#endif
930 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900931 perror("file_ram_alloc: can't mmap RAM pages");
932 close(fd);
933 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300934 }
Alex Williamson04b16652010-07-02 11:13:17 -0600935 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300936 return area;
937}
938#endif
939
Alex Williamsond17b5282010-06-25 11:08:38 -0600940static ram_addr_t find_ram_offset(ram_addr_t size)
941{
Alex Williamson04b16652010-07-02 11:13:17 -0600942 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600943 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600944
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100945 assert(size != 0); /* it would hand out same offset multiple times */
946
Paolo Bonzinia3161032012-11-14 15:54:48 +0100947 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600948 return 0;
949
Paolo Bonzinia3161032012-11-14 15:54:48 +0100950 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000951 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600952
953 end = block->offset + block->length;
954
Paolo Bonzinia3161032012-11-14 15:54:48 +0100955 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600956 if (next_block->offset >= end) {
957 next = MIN(next, next_block->offset);
958 }
959 }
960 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600961 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600962 mingap = next - end;
963 }
964 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600965
966 if (offset == RAM_ADDR_MAX) {
967 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
968 (uint64_t)size);
969 abort();
970 }
971
Alex Williamson04b16652010-07-02 11:13:17 -0600972 return offset;
973}
974
Juan Quintela652d7ec2012-07-20 10:37:54 +0200975ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600976{
Alex Williamsond17b5282010-06-25 11:08:38 -0600977 RAMBlock *block;
978 ram_addr_t last = 0;
979
Paolo Bonzinia3161032012-11-14 15:54:48 +0100980 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600981 last = MAX(last, block->offset + block->length);
982
983 return last;
984}
985
Jason Baronddb97f12012-08-02 15:44:16 -0400986static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
987{
988 int ret;
989 QemuOpts *machine_opts;
990
991 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
992 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
993 if (machine_opts &&
994 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
995 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
996 if (ret) {
997 perror("qemu_madvise");
998 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
999 "but dump_guest_core=off specified\n");
1000 }
1001 }
1002}
1003
Avi Kivityc5705a72011-12-20 15:59:12 +02001004void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001005{
1006 RAMBlock *new_block, *block;
1007
Avi Kivityc5705a72011-12-20 15:59:12 +02001008 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001009 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001010 if (block->offset == addr) {
1011 new_block = block;
1012 break;
1013 }
1014 }
1015 assert(new_block);
1016 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001017
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001018 if (dev) {
1019 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001020 if (id) {
1021 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001022 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001023 }
1024 }
1025 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1026
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001027 /* This assumes the iothread lock is taken here too. */
1028 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001029 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001030 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001031 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1032 new_block->idstr);
1033 abort();
1034 }
1035 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001036 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001037}
1038
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001039static int memory_try_enable_merging(void *addr, size_t len)
1040{
1041 QemuOpts *opts;
1042
1043 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1044 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1045 /* disabled by the user */
1046 return 0;
1047 }
1048
1049 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1050}
1051
Avi Kivityc5705a72011-12-20 15:59:12 +02001052ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1053 MemoryRegion *mr)
1054{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001055 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001056
1057 size = TARGET_PAGE_ALIGN(size);
1058 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001059
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001060 /* This assumes the iothread lock is taken here too. */
1061 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001062 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001063 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001064 if (host) {
1065 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001066 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001067 } else {
1068 if (mem_path) {
1069#if defined (__linux__) && !defined(TARGET_S390X)
1070 new_block->host = file_ram_alloc(new_block, size, mem_path);
1071 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001072 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001073 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001074 }
1075#else
1076 fprintf(stderr, "-mem-path option unsupported\n");
1077 exit(1);
1078#endif
1079 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001080 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001081 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001082 } else if (kvm_enabled()) {
1083 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001084 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001085 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001086 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001087 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001088 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001089 }
1090 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001091 new_block->length = size;
1092
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001093 /* Keep the list sorted from biggest to smallest block. */
1094 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1095 if (block->length < new_block->length) {
1096 break;
1097 }
1098 }
1099 if (block) {
1100 QTAILQ_INSERT_BEFORE(block, new_block, next);
1101 } else {
1102 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1103 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001104 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001105
Umesh Deshpandef798b072011-08-18 11:41:17 -07001106 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001107 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001108
Anthony Liguori7267c092011-08-20 22:09:37 -05001109 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001110 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001111 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1112 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001113 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001114
Jason Baronddb97f12012-08-02 15:44:16 -04001115 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001116 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001117
Cam Macdonell84b89d72010-07-26 18:10:57 -06001118 if (kvm_enabled())
1119 kvm_setup_guest_memory(new_block->host, size);
1120
1121 return new_block->offset;
1122}
1123
Avi Kivityc5705a72011-12-20 15:59:12 +02001124ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001125{
Avi Kivityc5705a72011-12-20 15:59:12 +02001126 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001127}
bellarde9a1ab12007-02-08 23:08:38 +00001128
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001129void qemu_ram_free_from_ptr(ram_addr_t addr)
1130{
1131 RAMBlock *block;
1132
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001133 /* This assumes the iothread lock is taken here too. */
1134 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001135 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001136 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001137 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001138 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001139 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001140 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001141 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001142 }
1143 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001144 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001145}
1146
Anthony Liguoric227f092009-10-01 16:12:16 -05001147void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001148{
Alex Williamson04b16652010-07-02 11:13:17 -06001149 RAMBlock *block;
1150
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001151 /* This assumes the iothread lock is taken here too. */
1152 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001153 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001154 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001155 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001156 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001157 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001158 if (block->flags & RAM_PREALLOC_MASK) {
1159 ;
1160 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001161#if defined (__linux__) && !defined(TARGET_S390X)
1162 if (block->fd) {
1163 munmap(block->host, block->length);
1164 close(block->fd);
1165 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001166 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001167 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001168#else
1169 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001170#endif
1171 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001172 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001173 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001174 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001175 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001176 }
Alex Williamson04b16652010-07-02 11:13:17 -06001177 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001178 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001179 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001180 }
1181 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001182 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001183
bellarde9a1ab12007-02-08 23:08:38 +00001184}
1185
Huang Yingcd19cfa2011-03-02 08:56:19 +01001186#ifndef _WIN32
1187void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1188{
1189 RAMBlock *block;
1190 ram_addr_t offset;
1191 int flags;
1192 void *area, *vaddr;
1193
Paolo Bonzinia3161032012-11-14 15:54:48 +01001194 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001195 offset = addr - block->offset;
1196 if (offset < block->length) {
1197 vaddr = block->host + offset;
1198 if (block->flags & RAM_PREALLOC_MASK) {
1199 ;
1200 } else {
1201 flags = MAP_FIXED;
1202 munmap(vaddr, length);
1203 if (mem_path) {
1204#if defined(__linux__) && !defined(TARGET_S390X)
1205 if (block->fd) {
1206#ifdef MAP_POPULATE
1207 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1208 MAP_PRIVATE;
1209#else
1210 flags |= MAP_PRIVATE;
1211#endif
1212 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1213 flags, block->fd, offset);
1214 } else {
1215 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1216 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1217 flags, -1, 0);
1218 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001219#else
1220 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001221#endif
1222 } else {
1223#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1224 flags |= MAP_SHARED | MAP_ANONYMOUS;
1225 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1226 flags, -1, 0);
1227#else
1228 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1229 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1230 flags, -1, 0);
1231#endif
1232 }
1233 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001234 fprintf(stderr, "Could not remap addr: "
1235 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001236 length, addr);
1237 exit(1);
1238 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001239 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001240 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001241 }
1242 return;
1243 }
1244 }
1245}
1246#endif /* !_WIN32 */
1247
pbrookdc828ca2009-04-09 22:21:07 +00001248/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001249 With the exception of the softmmu code in this file, this should
1250 only be used for local memory (e.g. video ram) that the device owns,
1251 and knows it isn't going to access beyond the end of the block.
1252
1253 It should not be used for general purpose DMA.
1254 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1255 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001256void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001257{
pbrook94a6b542009-04-11 17:15:54 +00001258 RAMBlock *block;
1259
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001260 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001261 block = ram_list.mru_block;
1262 if (block && addr - block->offset < block->length) {
1263 goto found;
1264 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001265 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001266 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001267 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001268 }
pbrook94a6b542009-04-11 17:15:54 +00001269 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001270
1271 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1272 abort();
1273
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001274found:
1275 ram_list.mru_block = block;
1276 if (xen_enabled()) {
1277 /* We need to check if the requested address is in the RAM
1278 * because we don't want to map the entire memory in QEMU.
1279 * In that case just map until the end of the page.
1280 */
1281 if (block->offset == 0) {
1282 return xen_map_cache(addr, 0, 0);
1283 } else if (block->host == NULL) {
1284 block->host =
1285 xen_map_cache(block->offset, block->length, 1);
1286 }
1287 }
1288 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001289}
1290
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001291/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1292 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1293 *
1294 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001295 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001296static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001297{
1298 RAMBlock *block;
1299
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001300 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001301 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001302 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001303 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001304 /* We need to check if the requested address is in the RAM
1305 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001306 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001307 */
1308 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001309 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001310 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001311 block->host =
1312 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001313 }
1314 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001315 return block->host + (addr - block->offset);
1316 }
1317 }
1318
1319 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1320 abort();
1321
1322 return NULL;
1323}
1324
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001325/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1326 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001327static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001328{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001329 if (*size == 0) {
1330 return NULL;
1331 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001332 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001333 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001334 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001335 RAMBlock *block;
1336
Paolo Bonzinia3161032012-11-14 15:54:48 +01001337 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001338 if (addr - block->offset < block->length) {
1339 if (addr - block->offset + *size > block->length)
1340 *size = block->length - addr + block->offset;
1341 return block->host + (addr - block->offset);
1342 }
1343 }
1344
1345 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1346 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001347 }
1348}
1349
Marcelo Tosattie8902612010-10-11 15:31:19 -03001350int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001351{
pbrook94a6b542009-04-11 17:15:54 +00001352 RAMBlock *block;
1353 uint8_t *host = ptr;
1354
Jan Kiszka868bb332011-06-21 22:59:09 +02001355 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001356 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001357 return 0;
1358 }
1359
Paolo Bonzinia3161032012-11-14 15:54:48 +01001360 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001361 /* This case append when the block is not mapped. */
1362 if (block->host == NULL) {
1363 continue;
1364 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001365 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001366 *ram_addr = block->offset + (host - block->host);
1367 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001368 }
pbrook94a6b542009-04-11 17:15:54 +00001369 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001370
Marcelo Tosattie8902612010-10-11 15:31:19 -03001371 return -1;
1372}
Alex Williamsonf471a172010-06-11 11:11:42 -06001373
Marcelo Tosattie8902612010-10-11 15:31:19 -03001374/* Some of the softmmu routines need to translate from a host pointer
1375 (typically a TLB entry) back to a ram offset. */
1376ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1377{
1378 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001379
Marcelo Tosattie8902612010-10-11 15:31:19 -03001380 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1381 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1382 abort();
1383 }
1384 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001385}
1386
Avi Kivitya8170e52012-10-23 12:30:10 +02001387static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001388 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001389{
pbrook67d3b952006-12-18 05:03:52 +00001390#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001391 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001392#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001393#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001394 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001395#endif
1396 return 0;
1397}
1398
Avi Kivitya8170e52012-10-23 12:30:10 +02001399static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001400 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001401{
1402#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001403 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001404#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001405#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001406 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001407#endif
1408}
1409
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001410static const MemoryRegionOps unassigned_mem_ops = {
1411 .read = unassigned_mem_read,
1412 .write = unassigned_mem_write,
1413 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001414};
1415
Avi Kivitya8170e52012-10-23 12:30:10 +02001416static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001417 unsigned size)
1418{
1419 abort();
1420}
1421
Avi Kivitya8170e52012-10-23 12:30:10 +02001422static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001423 uint64_t value, unsigned size)
1424{
1425 abort();
1426}
1427
1428static const MemoryRegionOps error_mem_ops = {
1429 .read = error_mem_read,
1430 .write = error_mem_write,
1431 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001432};
1433
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001434static const MemoryRegionOps rom_mem_ops = {
1435 .read = error_mem_read,
1436 .write = unassigned_mem_write,
1437 .endianness = DEVICE_NATIVE_ENDIAN,
1438};
1439
Avi Kivitya8170e52012-10-23 12:30:10 +02001440static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001441 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001442{
bellard3a7d9292005-08-21 09:26:42 +00001443 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001444 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001445 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1446#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001447 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001448 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001449#endif
1450 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001451 switch (size) {
1452 case 1:
1453 stb_p(qemu_get_ram_ptr(ram_addr), val);
1454 break;
1455 case 2:
1456 stw_p(qemu_get_ram_ptr(ram_addr), val);
1457 break;
1458 case 4:
1459 stl_p(qemu_get_ram_ptr(ram_addr), val);
1460 break;
1461 default:
1462 abort();
1463 }
bellardf23db162005-08-21 19:12:28 +00001464 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001465 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001466 /* we remove the notdirty callback only if the code has been
1467 flushed */
1468 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001469 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001470}
1471
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001472static const MemoryRegionOps notdirty_mem_ops = {
1473 .read = error_mem_read,
1474 .write = notdirty_mem_write,
1475 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001476};
1477
pbrook0f459d12008-06-09 00:20:13 +00001478/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001479static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001480{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001481 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001482 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001483 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001484 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001485 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001486
aliguori06d55cc2008-11-18 20:24:06 +00001487 if (env->watchpoint_hit) {
1488 /* We re-entered the check after replacing the TB. Now raise
1489 * the debug interrupt so that is will trigger after the
1490 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001491 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001492 return;
1493 }
pbrook2e70f6e2008-06-29 01:03:05 +00001494 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001495 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001496 if ((vaddr == (wp->vaddr & len_mask) ||
1497 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001498 wp->flags |= BP_WATCHPOINT_HIT;
1499 if (!env->watchpoint_hit) {
1500 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001501 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001502 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1503 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001504 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001505 } else {
1506 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1507 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001508 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001509 }
aliguori06d55cc2008-11-18 20:24:06 +00001510 }
aliguori6e140f22008-11-18 20:37:55 +00001511 } else {
1512 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001513 }
1514 }
1515}
1516
pbrook6658ffb2007-03-16 23:58:11 +00001517/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1518 so these check for a hit then pass through to the normal out-of-line
1519 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001520static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001521 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001522{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001523 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1524 switch (size) {
1525 case 1: return ldub_phys(addr);
1526 case 2: return lduw_phys(addr);
1527 case 4: return ldl_phys(addr);
1528 default: abort();
1529 }
pbrook6658ffb2007-03-16 23:58:11 +00001530}
1531
Avi Kivitya8170e52012-10-23 12:30:10 +02001532static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001533 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001534{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001535 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1536 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001537 case 1:
1538 stb_phys(addr, val);
1539 break;
1540 case 2:
1541 stw_phys(addr, val);
1542 break;
1543 case 4:
1544 stl_phys(addr, val);
1545 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001546 default: abort();
1547 }
pbrook6658ffb2007-03-16 23:58:11 +00001548}
1549
Avi Kivity1ec9b902012-01-02 12:47:48 +02001550static const MemoryRegionOps watch_mem_ops = {
1551 .read = watch_mem_read,
1552 .write = watch_mem_write,
1553 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001554};
pbrook6658ffb2007-03-16 23:58:11 +00001555
Avi Kivitya8170e52012-10-23 12:30:10 +02001556static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001557 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001558{
Avi Kivity70c68e42012-01-02 12:32:48 +02001559 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001560 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001561 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001562#if defined(DEBUG_SUBPAGE)
1563 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1564 mmio, len, addr, idx);
1565#endif
blueswir1db7b5422007-05-26 17:36:03 +00001566
Avi Kivity5312bd82012-02-12 18:32:55 +02001567 section = &phys_sections[mmio->sub_section[idx]];
1568 addr += mmio->base;
1569 addr -= section->offset_within_address_space;
1570 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001571 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001572}
1573
Avi Kivitya8170e52012-10-23 12:30:10 +02001574static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001575 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001576{
Avi Kivity70c68e42012-01-02 12:32:48 +02001577 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001578 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001579 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001580#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001581 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1582 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001583 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001584#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001585
Avi Kivity5312bd82012-02-12 18:32:55 +02001586 section = &phys_sections[mmio->sub_section[idx]];
1587 addr += mmio->base;
1588 addr -= section->offset_within_address_space;
1589 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001590 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001591}
1592
Avi Kivity70c68e42012-01-02 12:32:48 +02001593static const MemoryRegionOps subpage_ops = {
1594 .read = subpage_read,
1595 .write = subpage_write,
1596 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001597};
1598
Avi Kivitya8170e52012-10-23 12:30:10 +02001599static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001600 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001601{
1602 ram_addr_t raddr = addr;
1603 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001604 switch (size) {
1605 case 1: return ldub_p(ptr);
1606 case 2: return lduw_p(ptr);
1607 case 4: return ldl_p(ptr);
1608 default: abort();
1609 }
Andreas Färber56384e82011-11-30 16:26:21 +01001610}
1611
Avi Kivitya8170e52012-10-23 12:30:10 +02001612static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001613 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001614{
1615 ram_addr_t raddr = addr;
1616 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001617 switch (size) {
1618 case 1: return stb_p(ptr, value);
1619 case 2: return stw_p(ptr, value);
1620 case 4: return stl_p(ptr, value);
1621 default: abort();
1622 }
Andreas Färber56384e82011-11-30 16:26:21 +01001623}
1624
Avi Kivityde712f92012-01-02 12:41:07 +02001625static const MemoryRegionOps subpage_ram_ops = {
1626 .read = subpage_ram_read,
1627 .write = subpage_ram_write,
1628 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001629};
1630
Anthony Liguoric227f092009-10-01 16:12:16 -05001631static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001632 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001633{
1634 int idx, eidx;
1635
1636 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1637 return -1;
1638 idx = SUBPAGE_IDX(start);
1639 eidx = SUBPAGE_IDX(end);
1640#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001641 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001642 mmio, start, end, idx, eidx, memory);
1643#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001644 if (memory_region_is_ram(phys_sections[section].mr)) {
1645 MemoryRegionSection new_section = phys_sections[section];
1646 new_section.mr = &io_mem_subpage_ram;
1647 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001648 }
blueswir1db7b5422007-05-26 17:36:03 +00001649 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001650 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001651 }
1652
1653 return 0;
1654}
1655
Avi Kivitya8170e52012-10-23 12:30:10 +02001656static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001657{
Anthony Liguoric227f092009-10-01 16:12:16 -05001658 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001659
Anthony Liguori7267c092011-08-20 22:09:37 -05001660 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001661
1662 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001663 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1664 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001665 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001666#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001667 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1668 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001669#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001670 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001671
1672 return mmio;
1673}
1674
Avi Kivity5312bd82012-02-12 18:32:55 +02001675static uint16_t dummy_section(MemoryRegion *mr)
1676{
1677 MemoryRegionSection section = {
1678 .mr = mr,
1679 .offset_within_address_space = 0,
1680 .offset_within_region = 0,
1681 .size = UINT64_MAX,
1682 };
1683
1684 return phys_section_add(&section);
1685}
1686
Avi Kivitya8170e52012-10-23 12:30:10 +02001687MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001688{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001689 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001690}
1691
Avi Kivitye9179ce2009-06-14 11:38:52 +03001692static void io_mem_init(void)
1693{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001694 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001695 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1696 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1697 "unassigned", UINT64_MAX);
1698 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1699 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001700 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1701 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001702 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1703 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001704}
1705
Avi Kivityac1970f2012-10-03 16:22:53 +02001706static void mem_begin(MemoryListener *listener)
1707{
1708 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1709
1710 destroy_all_mappings(d);
1711 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1712}
1713
Avi Kivity50c1e142012-02-08 21:36:02 +02001714static void core_begin(MemoryListener *listener)
1715{
Avi Kivity5312bd82012-02-12 18:32:55 +02001716 phys_sections_clear();
1717 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001718 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1719 phys_section_rom = dummy_section(&io_mem_rom);
1720 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001721}
1722
Avi Kivity1d711482012-10-02 18:54:45 +02001723static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001724{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001725 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001726
1727 /* since each CPU stores ram addresses in its TLB cache, we must
1728 reset the modified entries */
1729 /* XXX: slow ! */
1730 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1731 tlb_flush(env, 1);
1732 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001733}
1734
Avi Kivity93632742012-02-08 16:54:16 +02001735static void core_log_global_start(MemoryListener *listener)
1736{
1737 cpu_physical_memory_set_dirty_tracking(1);
1738}
1739
1740static void core_log_global_stop(MemoryListener *listener)
1741{
1742 cpu_physical_memory_set_dirty_tracking(0);
1743}
1744
Avi Kivity4855d412012-02-08 21:16:05 +02001745static void io_region_add(MemoryListener *listener,
1746 MemoryRegionSection *section)
1747{
Avi Kivitya2d33522012-03-05 17:40:12 +02001748 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1749
1750 mrio->mr = section->mr;
1751 mrio->offset = section->offset_within_region;
1752 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001753 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001754 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001755}
1756
1757static void io_region_del(MemoryListener *listener,
1758 MemoryRegionSection *section)
1759{
1760 isa_unassign_ioport(section->offset_within_address_space, section->size);
1761}
1762
Avi Kivity93632742012-02-08 16:54:16 +02001763static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001764 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001765 .log_global_start = core_log_global_start,
1766 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001767 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001768};
1769
Avi Kivity4855d412012-02-08 21:16:05 +02001770static MemoryListener io_memory_listener = {
1771 .region_add = io_region_add,
1772 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001773 .priority = 0,
1774};
1775
Avi Kivity1d711482012-10-02 18:54:45 +02001776static MemoryListener tcg_memory_listener = {
1777 .commit = tcg_commit,
1778};
1779
Avi Kivityac1970f2012-10-03 16:22:53 +02001780void address_space_init_dispatch(AddressSpace *as)
1781{
1782 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1783
1784 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1785 d->listener = (MemoryListener) {
1786 .begin = mem_begin,
1787 .region_add = mem_add,
1788 .region_nop = mem_add,
1789 .priority = 0,
1790 };
1791 as->dispatch = d;
1792 memory_listener_register(&d->listener, as);
1793}
1794
Avi Kivity83f3c252012-10-07 12:59:55 +02001795void address_space_destroy_dispatch(AddressSpace *as)
1796{
1797 AddressSpaceDispatch *d = as->dispatch;
1798
1799 memory_listener_unregister(&d->listener);
1800 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1801 g_free(d);
1802 as->dispatch = NULL;
1803}
1804
Avi Kivity62152b82011-07-26 14:26:14 +03001805static void memory_map_init(void)
1806{
Anthony Liguori7267c092011-08-20 22:09:37 -05001807 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001808 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001809 address_space_init(&address_space_memory, system_memory);
1810 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001811
Anthony Liguori7267c092011-08-20 22:09:37 -05001812 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001813 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001814 address_space_init(&address_space_io, system_io);
1815 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001816
Avi Kivityf6790af2012-10-02 20:13:51 +02001817 memory_listener_register(&core_memory_listener, &address_space_memory);
1818 memory_listener_register(&io_memory_listener, &address_space_io);
1819 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001820
1821 dma_context_init(&dma_context_memory, &address_space_memory,
1822 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001823}
1824
1825MemoryRegion *get_system_memory(void)
1826{
1827 return system_memory;
1828}
1829
Avi Kivity309cb472011-08-08 16:09:03 +03001830MemoryRegion *get_system_io(void)
1831{
1832 return system_io;
1833}
1834
pbrooke2eef172008-06-08 01:09:01 +00001835#endif /* !defined(CONFIG_USER_ONLY) */
1836
bellard13eb76e2004-01-24 15:23:36 +00001837/* physical memory access (slow version, mainly for debug) */
1838#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001839int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001840 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001841{
1842 int l, flags;
1843 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001844 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001845
1846 while (len > 0) {
1847 page = addr & TARGET_PAGE_MASK;
1848 l = (page + TARGET_PAGE_SIZE) - addr;
1849 if (l > len)
1850 l = len;
1851 flags = page_get_flags(page);
1852 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001853 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001854 if (is_write) {
1855 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001856 return -1;
bellard579a97f2007-11-11 14:26:47 +00001857 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001858 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001859 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001860 memcpy(p, buf, l);
1861 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001862 } else {
1863 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001864 return -1;
bellard579a97f2007-11-11 14:26:47 +00001865 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001866 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001867 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001868 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001869 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001870 }
1871 len -= l;
1872 buf += l;
1873 addr += l;
1874 }
Paul Brooka68fe892010-03-01 00:08:59 +00001875 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001876}
bellard8df1cd02005-01-28 22:37:22 +00001877
bellard13eb76e2004-01-24 15:23:36 +00001878#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001879
Avi Kivitya8170e52012-10-23 12:30:10 +02001880static void invalidate_and_set_dirty(hwaddr addr,
1881 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001882{
1883 if (!cpu_physical_memory_is_dirty(addr)) {
1884 /* invalidate code */
1885 tb_invalidate_phys_page_range(addr, addr + length, 0);
1886 /* set dirty bit */
1887 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1888 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001889 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001890}
1891
Avi Kivitya8170e52012-10-23 12:30:10 +02001892void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001893 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001894{
Avi Kivityac1970f2012-10-03 16:22:53 +02001895 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001896 int l;
bellard13eb76e2004-01-24 15:23:36 +00001897 uint8_t *ptr;
1898 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001899 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001900 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001901
bellard13eb76e2004-01-24 15:23:36 +00001902 while (len > 0) {
1903 page = addr & TARGET_PAGE_MASK;
1904 l = (page + TARGET_PAGE_SIZE) - addr;
1905 if (l > len)
1906 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001907 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001908
bellard13eb76e2004-01-24 15:23:36 +00001909 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001910 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001911 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001912 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001913 /* XXX: could force cpu_single_env to NULL to avoid
1914 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001915 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001916 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001917 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001918 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001919 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001920 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001921 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001922 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001923 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001924 l = 2;
1925 } else {
bellard1c213d12005-09-03 10:49:04 +00001926 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001927 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001928 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001929 l = 1;
1930 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001931 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001932 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001933 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001934 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001935 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001936 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001937 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001938 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001939 }
1940 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001941 if (!(memory_region_is_ram(section->mr) ||
1942 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001943 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001944 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001945 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001946 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001947 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001948 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001949 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001950 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001951 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001952 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001953 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001954 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001955 l = 2;
1956 } else {
bellard1c213d12005-09-03 10:49:04 +00001957 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001958 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001959 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001960 l = 1;
1961 }
1962 } else {
1963 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001964 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001965 + memory_region_section_addr(section,
1966 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001967 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001968 }
1969 }
1970 len -= l;
1971 buf += l;
1972 addr += l;
1973 }
1974}
bellard8df1cd02005-01-28 22:37:22 +00001975
Avi Kivitya8170e52012-10-23 12:30:10 +02001976void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001977 const uint8_t *buf, int len)
1978{
1979 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1980}
1981
1982/**
1983 * address_space_read: read from an address space.
1984 *
1985 * @as: #AddressSpace to be accessed
1986 * @addr: address within that address space
1987 * @buf: buffer with the data transferred
1988 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001989void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001990{
1991 address_space_rw(as, addr, buf, len, false);
1992}
1993
1994
Avi Kivitya8170e52012-10-23 12:30:10 +02001995void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001996 int len, int is_write)
1997{
1998 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1999}
2000
bellardd0ecd2a2006-04-23 17:14:48 +00002001/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002002void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002003 const uint8_t *buf, int len)
2004{
Avi Kivityac1970f2012-10-03 16:22:53 +02002005 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00002006 int l;
2007 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02002008 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002009 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00002010
bellardd0ecd2a2006-04-23 17:14:48 +00002011 while (len > 0) {
2012 page = addr & TARGET_PAGE_MASK;
2013 l = (page + TARGET_PAGE_SIZE) - addr;
2014 if (l > len)
2015 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002016 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002017
Blue Swirlcc5bea62012-04-14 14:56:48 +00002018 if (!(memory_region_is_ram(section->mr) ||
2019 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002020 /* do nothing */
2021 } else {
2022 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002023 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002024 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002025 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002026 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002027 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002028 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002029 }
2030 len -= l;
2031 buf += l;
2032 addr += l;
2033 }
2034}
2035
aliguori6d16c2f2009-01-22 16:59:11 +00002036typedef struct {
2037 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002038 hwaddr addr;
2039 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002040} BounceBuffer;
2041
2042static BounceBuffer bounce;
2043
aliguoriba223c22009-01-22 16:59:16 +00002044typedef struct MapClient {
2045 void *opaque;
2046 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002047 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002048} MapClient;
2049
Blue Swirl72cf2d42009-09-12 07:36:22 +00002050static QLIST_HEAD(map_client_list, MapClient) map_client_list
2051 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002052
2053void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2054{
Anthony Liguori7267c092011-08-20 22:09:37 -05002055 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002056
2057 client->opaque = opaque;
2058 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002059 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002060 return client;
2061}
2062
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002063static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002064{
2065 MapClient *client = (MapClient *)_client;
2066
Blue Swirl72cf2d42009-09-12 07:36:22 +00002067 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002068 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002069}
2070
2071static void cpu_notify_map_clients(void)
2072{
2073 MapClient *client;
2074
Blue Swirl72cf2d42009-09-12 07:36:22 +00002075 while (!QLIST_EMPTY(&map_client_list)) {
2076 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002077 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002078 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002079 }
2080}
2081
aliguori6d16c2f2009-01-22 16:59:11 +00002082/* Map a physical memory region into a host virtual address.
2083 * May map a subset of the requested range, given by and returned in *plen.
2084 * May return NULL if resources needed to perform the mapping are exhausted.
2085 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002086 * Use cpu_register_map_client() to know when retrying the map operation is
2087 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002088 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002089void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002090 hwaddr addr,
2091 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002092 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002093{
Avi Kivityac1970f2012-10-03 16:22:53 +02002094 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002095 hwaddr len = *plen;
2096 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002097 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002098 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002099 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002100 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002101 ram_addr_t rlen;
2102 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002103
2104 while (len > 0) {
2105 page = addr & TARGET_PAGE_MASK;
2106 l = (page + TARGET_PAGE_SIZE) - addr;
2107 if (l > len)
2108 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002109 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002110
Avi Kivityf3705d52012-03-08 16:16:34 +02002111 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002112 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002113 break;
2114 }
2115 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2116 bounce.addr = addr;
2117 bounce.len = l;
2118 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002119 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002120 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002121
2122 *plen = l;
2123 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002124 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002125 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002126 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002127 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002128 }
aliguori6d16c2f2009-01-22 16:59:11 +00002129
2130 len -= l;
2131 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002132 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002133 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002134 rlen = todo;
2135 ret = qemu_ram_ptr_length(raddr, &rlen);
2136 *plen = rlen;
2137 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002138}
2139
Avi Kivityac1970f2012-10-03 16:22:53 +02002140/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002141 * Will also mark the memory as dirty if is_write == 1. access_len gives
2142 * the amount of memory that was actually read or written by the caller.
2143 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002144void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2145 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002146{
2147 if (buffer != bounce.buffer) {
2148 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002149 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002150 while (access_len) {
2151 unsigned l;
2152 l = TARGET_PAGE_SIZE;
2153 if (l > access_len)
2154 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002155 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002156 addr1 += l;
2157 access_len -= l;
2158 }
2159 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002160 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002161 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002162 }
aliguori6d16c2f2009-01-22 16:59:11 +00002163 return;
2164 }
2165 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002166 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002167 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002168 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002169 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002170 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002171}
bellardd0ecd2a2006-04-23 17:14:48 +00002172
Avi Kivitya8170e52012-10-23 12:30:10 +02002173void *cpu_physical_memory_map(hwaddr addr,
2174 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002175 int is_write)
2176{
2177 return address_space_map(&address_space_memory, addr, plen, is_write);
2178}
2179
Avi Kivitya8170e52012-10-23 12:30:10 +02002180void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2181 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002182{
2183 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2184}
2185
bellard8df1cd02005-01-28 22:37:22 +00002186/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002187static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002188 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002189{
bellard8df1cd02005-01-28 22:37:22 +00002190 uint8_t *ptr;
2191 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002192 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002193
Avi Kivityac1970f2012-10-03 16:22:53 +02002194 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002195
Blue Swirlcc5bea62012-04-14 14:56:48 +00002196 if (!(memory_region_is_ram(section->mr) ||
2197 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002198 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002199 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002200 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002201#if defined(TARGET_WORDS_BIGENDIAN)
2202 if (endian == DEVICE_LITTLE_ENDIAN) {
2203 val = bswap32(val);
2204 }
2205#else
2206 if (endian == DEVICE_BIG_ENDIAN) {
2207 val = bswap32(val);
2208 }
2209#endif
bellard8df1cd02005-01-28 22:37:22 +00002210 } else {
2211 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002212 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002213 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002214 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002215 switch (endian) {
2216 case DEVICE_LITTLE_ENDIAN:
2217 val = ldl_le_p(ptr);
2218 break;
2219 case DEVICE_BIG_ENDIAN:
2220 val = ldl_be_p(ptr);
2221 break;
2222 default:
2223 val = ldl_p(ptr);
2224 break;
2225 }
bellard8df1cd02005-01-28 22:37:22 +00002226 }
2227 return val;
2228}
2229
Avi Kivitya8170e52012-10-23 12:30:10 +02002230uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002231{
2232 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2233}
2234
Avi Kivitya8170e52012-10-23 12:30:10 +02002235uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002236{
2237 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2238}
2239
Avi Kivitya8170e52012-10-23 12:30:10 +02002240uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002241{
2242 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2243}
2244
bellard84b7b8e2005-11-28 21:19:04 +00002245/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002246static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002247 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002248{
bellard84b7b8e2005-11-28 21:19:04 +00002249 uint8_t *ptr;
2250 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002251 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002252
Avi Kivityac1970f2012-10-03 16:22:53 +02002253 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002254
Blue Swirlcc5bea62012-04-14 14:56:48 +00002255 if (!(memory_region_is_ram(section->mr) ||
2256 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002257 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002258 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002259
2260 /* XXX This is broken when device endian != cpu endian.
2261 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002262#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002263 val = io_mem_read(section->mr, addr, 4) << 32;
2264 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002265#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002266 val = io_mem_read(section->mr, addr, 4);
2267 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002268#endif
2269 } else {
2270 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002271 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002272 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002273 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002274 switch (endian) {
2275 case DEVICE_LITTLE_ENDIAN:
2276 val = ldq_le_p(ptr);
2277 break;
2278 case DEVICE_BIG_ENDIAN:
2279 val = ldq_be_p(ptr);
2280 break;
2281 default:
2282 val = ldq_p(ptr);
2283 break;
2284 }
bellard84b7b8e2005-11-28 21:19:04 +00002285 }
2286 return val;
2287}
2288
Avi Kivitya8170e52012-10-23 12:30:10 +02002289uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002290{
2291 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2292}
2293
Avi Kivitya8170e52012-10-23 12:30:10 +02002294uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002295{
2296 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2297}
2298
Avi Kivitya8170e52012-10-23 12:30:10 +02002299uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002300{
2301 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2302}
2303
bellardaab33092005-10-30 20:48:42 +00002304/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002305uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002306{
2307 uint8_t val;
2308 cpu_physical_memory_read(addr, &val, 1);
2309 return val;
2310}
2311
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002312/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002313static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002314 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002315{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002316 uint8_t *ptr;
2317 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002318 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002319
Avi Kivityac1970f2012-10-03 16:22:53 +02002320 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002321
Blue Swirlcc5bea62012-04-14 14:56:48 +00002322 if (!(memory_region_is_ram(section->mr) ||
2323 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002324 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002325 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002326 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002327#if defined(TARGET_WORDS_BIGENDIAN)
2328 if (endian == DEVICE_LITTLE_ENDIAN) {
2329 val = bswap16(val);
2330 }
2331#else
2332 if (endian == DEVICE_BIG_ENDIAN) {
2333 val = bswap16(val);
2334 }
2335#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002336 } else {
2337 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002338 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002339 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002340 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002341 switch (endian) {
2342 case DEVICE_LITTLE_ENDIAN:
2343 val = lduw_le_p(ptr);
2344 break;
2345 case DEVICE_BIG_ENDIAN:
2346 val = lduw_be_p(ptr);
2347 break;
2348 default:
2349 val = lduw_p(ptr);
2350 break;
2351 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002352 }
2353 return val;
bellardaab33092005-10-30 20:48:42 +00002354}
2355
Avi Kivitya8170e52012-10-23 12:30:10 +02002356uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002357{
2358 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2359}
2360
Avi Kivitya8170e52012-10-23 12:30:10 +02002361uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002362{
2363 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2364}
2365
Avi Kivitya8170e52012-10-23 12:30:10 +02002366uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002367{
2368 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2369}
2370
bellard8df1cd02005-01-28 22:37:22 +00002371/* warning: addr must be aligned. The ram page is not masked as dirty
2372 and the code inside is not invalidated. It is useful if the dirty
2373 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002374void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002375{
bellard8df1cd02005-01-28 22:37:22 +00002376 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002377 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002378
Avi Kivityac1970f2012-10-03 16:22:53 +02002379 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002380
Avi Kivityf3705d52012-03-08 16:16:34 +02002381 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002382 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002383 if (memory_region_is_ram(section->mr)) {
2384 section = &phys_sections[phys_section_rom];
2385 }
2386 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002387 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002388 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002389 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002390 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002391 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002392 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002393
2394 if (unlikely(in_migration)) {
2395 if (!cpu_physical_memory_is_dirty(addr1)) {
2396 /* invalidate code */
2397 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2398 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002399 cpu_physical_memory_set_dirty_flags(
2400 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002401 }
2402 }
bellard8df1cd02005-01-28 22:37:22 +00002403 }
2404}
2405
2406/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002407static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002408 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002409{
bellard8df1cd02005-01-28 22:37:22 +00002410 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002411 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002412
Avi Kivityac1970f2012-10-03 16:22:53 +02002413 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002414
Avi Kivityf3705d52012-03-08 16:16:34 +02002415 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002416 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002417 if (memory_region_is_ram(section->mr)) {
2418 section = &phys_sections[phys_section_rom];
2419 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002420#if defined(TARGET_WORDS_BIGENDIAN)
2421 if (endian == DEVICE_LITTLE_ENDIAN) {
2422 val = bswap32(val);
2423 }
2424#else
2425 if (endian == DEVICE_BIG_ENDIAN) {
2426 val = bswap32(val);
2427 }
2428#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002429 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002430 } else {
2431 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002432 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002433 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002434 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002435 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002436 switch (endian) {
2437 case DEVICE_LITTLE_ENDIAN:
2438 stl_le_p(ptr, val);
2439 break;
2440 case DEVICE_BIG_ENDIAN:
2441 stl_be_p(ptr, val);
2442 break;
2443 default:
2444 stl_p(ptr, val);
2445 break;
2446 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002447 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002448 }
2449}
2450
Avi Kivitya8170e52012-10-23 12:30:10 +02002451void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002452{
2453 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2454}
2455
Avi Kivitya8170e52012-10-23 12:30:10 +02002456void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002457{
2458 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2459}
2460
Avi Kivitya8170e52012-10-23 12:30:10 +02002461void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002462{
2463 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2464}
2465
bellardaab33092005-10-30 20:48:42 +00002466/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002467void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002468{
2469 uint8_t v = val;
2470 cpu_physical_memory_write(addr, &v, 1);
2471}
2472
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002473/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002474static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002475 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002476{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002477 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002478 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002479
Avi Kivityac1970f2012-10-03 16:22:53 +02002480 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002481
Avi Kivityf3705d52012-03-08 16:16:34 +02002482 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002483 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002484 if (memory_region_is_ram(section->mr)) {
2485 section = &phys_sections[phys_section_rom];
2486 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002487#if defined(TARGET_WORDS_BIGENDIAN)
2488 if (endian == DEVICE_LITTLE_ENDIAN) {
2489 val = bswap16(val);
2490 }
2491#else
2492 if (endian == DEVICE_BIG_ENDIAN) {
2493 val = bswap16(val);
2494 }
2495#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002496 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002497 } else {
2498 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002499 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002500 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002501 /* RAM case */
2502 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002503 switch (endian) {
2504 case DEVICE_LITTLE_ENDIAN:
2505 stw_le_p(ptr, val);
2506 break;
2507 case DEVICE_BIG_ENDIAN:
2508 stw_be_p(ptr, val);
2509 break;
2510 default:
2511 stw_p(ptr, val);
2512 break;
2513 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002514 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002515 }
bellardaab33092005-10-30 20:48:42 +00002516}
2517
Avi Kivitya8170e52012-10-23 12:30:10 +02002518void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002519{
2520 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2521}
2522
Avi Kivitya8170e52012-10-23 12:30:10 +02002523void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002524{
2525 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2526}
2527
Avi Kivitya8170e52012-10-23 12:30:10 +02002528void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002529{
2530 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2531}
2532
bellardaab33092005-10-30 20:48:42 +00002533/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002534void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002535{
2536 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002537 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002538}
2539
Avi Kivitya8170e52012-10-23 12:30:10 +02002540void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002541{
2542 val = cpu_to_le64(val);
2543 cpu_physical_memory_write(addr, &val, 8);
2544}
2545
Avi Kivitya8170e52012-10-23 12:30:10 +02002546void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002547{
2548 val = cpu_to_be64(val);
2549 cpu_physical_memory_write(addr, &val, 8);
2550}
2551
aliguori5e2972f2009-03-28 17:51:36 +00002552/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002553int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002554 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002555{
2556 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002557 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002558 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002559
2560 while (len > 0) {
2561 page = addr & TARGET_PAGE_MASK;
2562 phys_addr = cpu_get_phys_page_debug(env, page);
2563 /* if no physical page mapped, return an error */
2564 if (phys_addr == -1)
2565 return -1;
2566 l = (page + TARGET_PAGE_SIZE) - addr;
2567 if (l > len)
2568 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002569 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002570 if (is_write)
2571 cpu_physical_memory_write_rom(phys_addr, buf, l);
2572 else
aliguori5e2972f2009-03-28 17:51:36 +00002573 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002574 len -= l;
2575 buf += l;
2576 addr += l;
2577 }
2578 return 0;
2579}
Paul Brooka68fe892010-03-01 00:08:59 +00002580#endif
bellard13eb76e2004-01-24 15:23:36 +00002581
Blue Swirl8e4a4242013-01-06 18:30:17 +00002582#if !defined(CONFIG_USER_ONLY)
2583
2584/*
2585 * A helper function for the _utterly broken_ virtio device model to find out if
2586 * it's running on a big endian machine. Don't do this at home kids!
2587 */
2588bool virtio_is_big_endian(void);
2589bool virtio_is_big_endian(void)
2590{
2591#if defined(TARGET_WORDS_BIGENDIAN)
2592 return true;
2593#else
2594 return false;
2595#endif
2596}
2597
2598#endif
2599
Wen Congyang76f35532012-05-07 12:04:18 +08002600#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002601bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002602{
2603 MemoryRegionSection *section;
2604
Avi Kivityac1970f2012-10-03 16:22:53 +02002605 section = phys_page_find(address_space_memory.dispatch,
2606 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002607
2608 return !(memory_region_is_ram(section->mr) ||
2609 memory_region_is_romd(section->mr));
2610}
2611#endif