blob: 8562fcac9c654032651ddefa04e69c46177f331d [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
Andreas Färberb170fce2013-01-20 20:23:22 +0100222#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber259186a2013-01-17 18:51:17 +0100226 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
Andreas Färberb170fce2013-01-20 20:23:22 +0100248#else
249#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000250#endif
251
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100252CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400253{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100254 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100255 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400256
257 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400260 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100261 }
Glauber Costa950f1472009-06-09 12:15:18 -0400262 env = env->next_cpu;
263 }
264
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100265 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400266}
267
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200268void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
269{
270 CPUArchState *env = first_cpu;
271
272 while (env) {
273 func(ENV_GET_CPU(env), data);
274 env = env->next_cpu;
275 }
276}
277
Andreas Färber9349b4f2012-03-14 01:38:32 +0100278void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000279{
Andreas Färber9f09e182012-05-03 06:59:07 +0200280 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100281 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100282 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000283 int cpu_index;
284
pbrookc2764712009-03-07 15:24:59 +0000285#if defined(CONFIG_USER_ONLY)
286 cpu_list_lock();
287#endif
bellard6a00d602005-11-21 23:25:50 +0000288 env->next_cpu = NULL;
289 penv = &first_cpu;
290 cpu_index = 0;
291 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700292 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000293 cpu_index++;
294 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100295 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100296 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000297 QTAILQ_INIT(&env->breakpoints);
298 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100299#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200300 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100301#endif
bellard6a00d602005-11-21 23:25:50 +0000302 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000303#if defined(CONFIG_USER_ONLY)
304 cpu_list_unlock();
305#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100306 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000307#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600308 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000309 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100310 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000311#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100312 if (cc->vmsd != NULL) {
313 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
314 }
bellardfd6ce8f2003-05-14 19:00:11 +0000315}
316
bellard1fddef42005-04-17 19:16:13 +0000317#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000318#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100319static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000320{
321 tb_invalidate_phys_page_range(pc, pc + 1, 0);
322}
323#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400324static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
325{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
327 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400328}
bellardc27004e2005-01-03 23:35:10 +0000329#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000330#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000331
Paul Brookc527ee82010-03-01 03:31:14 +0000332#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100333void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000334
335{
336}
337
Andreas Färber9349b4f2012-03-14 01:38:32 +0100338int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000339 int flags, CPUWatchpoint **watchpoint)
340{
341 return -ENOSYS;
342}
343#else
pbrook6658ffb2007-03-16 23:58:11 +0000344/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100345int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000346 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000347{
aliguorib4051332008-11-18 20:14:20 +0000348 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000349 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000350
aliguorib4051332008-11-18 20:14:20 +0000351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400352 if ((len & (len - 1)) || (addr & ~len_mask) ||
353 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000354 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
356 return -EINVAL;
357 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500358 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000359
aliguoria1d1bb32008-11-18 20:07:32 +0000360 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000361 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000362 wp->flags = flags;
363
aliguori2dc9f412008-11-18 20:56:59 +0000364 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000365 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000366 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000367 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000368 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000369
pbrook6658ffb2007-03-16 23:58:11 +0000370 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000371
372 if (watchpoint)
373 *watchpoint = wp;
374 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000375}
376
aliguoria1d1bb32008-11-18 20:07:32 +0000377/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100378int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000379 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000380{
aliguorib4051332008-11-18 20:14:20 +0000381 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000382 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000383
Blue Swirl72cf2d42009-09-12 07:36:22 +0000384 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000385 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000386 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000387 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000388 return 0;
389 }
390 }
aliguoria1d1bb32008-11-18 20:07:32 +0000391 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000392}
393
aliguoria1d1bb32008-11-18 20:07:32 +0000394/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100395void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000396{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000397 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000398
aliguoria1d1bb32008-11-18 20:07:32 +0000399 tlb_flush_page(env, watchpoint->vaddr);
400
Anthony Liguori7267c092011-08-20 22:09:37 -0500401 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000402}
403
aliguoria1d1bb32008-11-18 20:07:32 +0000404/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100405void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000406{
aliguoric0ce9982008-11-25 22:13:57 +0000407 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000408
Blue Swirl72cf2d42009-09-12 07:36:22 +0000409 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000410 if (wp->flags & mask)
411 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000412 }
aliguoria1d1bb32008-11-18 20:07:32 +0000413}
Paul Brookc527ee82010-03-01 03:31:14 +0000414#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000415
416/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100417int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000418 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000419{
bellard1fddef42005-04-17 19:16:13 +0000420#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000421 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000422
Anthony Liguori7267c092011-08-20 22:09:37 -0500423 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000424
425 bp->pc = pc;
426 bp->flags = flags;
427
aliguori2dc9f412008-11-18 20:56:59 +0000428 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000429 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000430 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000431 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000432 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000433
434 breakpoint_invalidate(env, pc);
435
436 if (breakpoint)
437 *breakpoint = bp;
438 return 0;
439#else
440 return -ENOSYS;
441#endif
442}
443
444/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100445int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000446{
447#if defined(TARGET_HAS_ICE)
448 CPUBreakpoint *bp;
449
Blue Swirl72cf2d42009-09-12 07:36:22 +0000450 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000451 if (bp->pc == pc && bp->flags == flags) {
452 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000453 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000454 }
bellard4c3a88a2003-07-26 12:06:08 +0000455 }
aliguoria1d1bb32008-11-18 20:07:32 +0000456 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000457#else
aliguoria1d1bb32008-11-18 20:07:32 +0000458 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000459#endif
460}
461
aliguoria1d1bb32008-11-18 20:07:32 +0000462/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100463void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000464{
bellard1fddef42005-04-17 19:16:13 +0000465#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000466 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000467
aliguoria1d1bb32008-11-18 20:07:32 +0000468 breakpoint_invalidate(env, breakpoint->pc);
469
Anthony Liguori7267c092011-08-20 22:09:37 -0500470 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000471#endif
472}
473
474/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100475void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000476{
477#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000478 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000479
Blue Swirl72cf2d42009-09-12 07:36:22 +0000480 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000481 if (bp->flags & mask)
482 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000483 }
bellard4c3a88a2003-07-26 12:06:08 +0000484#endif
485}
486
bellardc33a3462003-07-29 20:50:33 +0000487/* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100489void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000490{
bellard1fddef42005-04-17 19:16:13 +0000491#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000492 if (env->singlestep_enabled != enabled) {
493 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000494 if (kvm_enabled())
495 kvm_update_guest_debug(env, 0);
496 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100497 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000498 /* XXX: only flush what is necessary */
499 tb_flush(env);
500 }
bellardc33a3462003-07-29 20:50:33 +0000501 }
502#endif
503}
504
Andreas Färber9349b4f2012-03-14 01:38:32 +0100505void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000506{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100507 CPUState *cpu = ENV_GET_CPU(env);
508
509 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000510 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000511}
512
Andreas Färber9349b4f2012-03-14 01:38:32 +0100513void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000514{
515 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000516 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000517
518 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000519 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000520 fprintf(stderr, "qemu: fatal: ");
521 vfprintf(stderr, fmt, ap);
522 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100523 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt, ap2);
527 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100528 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000529 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000530 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000531 }
pbrook493ae1f2007-11-23 16:53:59 +0000532 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000533 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200534#if defined(CONFIG_USER_ONLY)
535 {
536 struct sigaction act;
537 sigfillset(&act.sa_mask);
538 act.sa_handler = SIG_DFL;
539 sigaction(SIGABRT, &act, NULL);
540 }
541#endif
bellard75012672003-06-21 13:11:07 +0000542 abort();
543}
544
Andreas Färber9349b4f2012-03-14 01:38:32 +0100545CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000546{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100547 CPUArchState *new_env = cpu_init(env->cpu_model_str);
548 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000549#if defined(TARGET_HAS_ICE)
550 CPUBreakpoint *bp;
551 CPUWatchpoint *wp;
552#endif
553
Andreas Färber9349b4f2012-03-14 01:38:32 +0100554 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000555
Andreas Färber55e5c282012-12-17 06:18:02 +0100556 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000557 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000558
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000562 QTAILQ_INIT(&env->breakpoints);
563 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000564#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000566 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
567 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000568 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000569 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
570 wp->flags, NULL);
571 }
572#endif
573
thsc5be9f02007-02-28 20:20:53 +0000574 return new_env;
575}
576
bellard01243112004-01-04 15:48:17 +0000577#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200578static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
579 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000580{
Juan Quintelad24981d2012-05-22 00:42:40 +0200581 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000582
bellard1ccde1c2004-02-06 19:46:14 +0000583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200585 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200586 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000587 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200588 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000589 != (end - 1) - start) {
590 abort();
591 }
Blue Swirle5548612012-04-21 13:08:33 +0000592 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200593
594}
595
596/* Note: start and end must be within the same ram block. */
597void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
598 int dirty_flags)
599{
600 uintptr_t length;
601
602 start &= TARGET_PAGE_MASK;
603 end = TARGET_PAGE_ALIGN(end);
604
605 length = end - start;
606 if (length == 0)
607 return;
608 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
609
610 if (tcg_enabled()) {
611 tlb_reset_dirty_range_all(start, end, length);
612 }
bellard1ccde1c2004-02-06 19:46:14 +0000613}
614
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000615static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000616{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200617 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000618 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200619 return ret;
aliguori74576192008-10-06 14:02:03 +0000620}
621
Avi Kivitya8170e52012-10-23 12:30:10 +0200622hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000623 MemoryRegionSection *section,
624 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200625 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000626 int prot,
627 target_ulong *address)
628{
Avi Kivitya8170e52012-10-23 12:30:10 +0200629 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000630 CPUWatchpoint *wp;
631
Blue Swirlcc5bea62012-04-14 14:56:48 +0000632 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000633 /* Normal RAM. */
634 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000635 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000636 if (!section->readonly) {
637 iotlb |= phys_section_notdirty;
638 } else {
639 iotlb |= phys_section_rom;
640 }
641 } else {
Blue Swirle5548612012-04-21 13:08:33 +0000642 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000643 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000644 }
645
646 /* Make accesses to pages with watchpoints go via the
647 watchpoint trap routines. */
648 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
649 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
650 /* Avoid trapping reads of pages with a write breakpoint. */
651 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
652 iotlb = phys_section_watch + paddr;
653 *address |= TLB_MMIO;
654 break;
655 }
656 }
657 }
658
659 return iotlb;
660}
bellard9fa3e852004-01-04 18:06:42 +0000661#endif /* defined(CONFIG_USER_ONLY) */
662
pbrooke2eef172008-06-08 01:09:01 +0000663#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000664
Paul Brookc04b2b72010-03-01 03:31:14 +0000665#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
666typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200667 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200668 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000670} subpage_t;
671
Anthony Liguoric227f092009-10-01 16:12:16 -0500672static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200673 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200674static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200675static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200676{
Avi Kivity5312bd82012-02-12 18:32:55 +0200677 MemoryRegionSection *section = &phys_sections[section_index];
678 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200679
680 if (mr->subpage) {
681 subpage_t *subpage = container_of(mr, subpage_t, iomem);
682 memory_region_destroy(&subpage->iomem);
683 g_free(subpage);
684 }
685}
686
Avi Kivity4346ae32012-02-10 17:00:01 +0200687static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200688{
689 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200690 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200691
Avi Kivityc19e8802012-02-13 20:25:31 +0200692 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200693 return;
694 }
695
Avi Kivityc19e8802012-02-13 20:25:31 +0200696 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200697 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200698 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200699 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200700 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200701 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200702 }
Avi Kivity54688b12012-02-09 17:34:32 +0200703 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200704 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200705 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200706}
707
Avi Kivityac1970f2012-10-03 16:22:53 +0200708static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200709{
Avi Kivityac1970f2012-10-03 16:22:53 +0200710 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200711 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200712}
713
Avi Kivity5312bd82012-02-12 18:32:55 +0200714static uint16_t phys_section_add(MemoryRegionSection *section)
715{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200716 /* The physical section number is ORed with a page-aligned
717 * pointer to produce the iotlb entries. Thus it should
718 * never overflow into the page-aligned value.
719 */
720 assert(phys_sections_nb < TARGET_PAGE_SIZE);
721
Avi Kivity5312bd82012-02-12 18:32:55 +0200722 if (phys_sections_nb == phys_sections_nb_alloc) {
723 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
724 phys_sections = g_renew(MemoryRegionSection, phys_sections,
725 phys_sections_nb_alloc);
726 }
727 phys_sections[phys_sections_nb] = *section;
728 return phys_sections_nb++;
729}
730
731static void phys_sections_clear(void)
732{
733 phys_sections_nb = 0;
734}
735
Avi Kivityac1970f2012-10-03 16:22:53 +0200736static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737{
738 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200739 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200740 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200741 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200742 MemoryRegionSection subsection = {
743 .offset_within_address_space = base,
744 .size = TARGET_PAGE_SIZE,
745 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200746 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200747
Avi Kivityf3705d52012-03-08 16:16:34 +0200748 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200749
Avi Kivityf3705d52012-03-08 16:16:34 +0200750 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200751 subpage = subpage_init(base);
752 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200753 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200754 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200755 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200756 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200757 }
758 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400759 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200760 subpage_register(subpage, start, end, phys_section_add(section));
761}
762
763
Avi Kivityac1970f2012-10-03 16:22:53 +0200764static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000765{
Avi Kivitya8170e52012-10-23 12:30:10 +0200766 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200767 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200768 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200769 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200770
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200771 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200772
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200773 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200774 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200775 section_index);
bellard33417e72003-08-10 21:47:01 +0000776}
777
Avi Kivityac1970f2012-10-03 16:22:53 +0200778static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200779{
Avi Kivityac1970f2012-10-03 16:22:53 +0200780 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200781 MemoryRegionSection now = *section, remain = *section;
782
783 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
784 || (now.size < TARGET_PAGE_SIZE)) {
785 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
786 - now.offset_within_address_space,
787 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200788 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200789 remain.size -= now.size;
790 remain.offset_within_address_space += now.size;
791 remain.offset_within_region += now.size;
792 }
Tyler Hall69b67642012-07-25 18:45:04 -0400793 while (remain.size >= TARGET_PAGE_SIZE) {
794 now = remain;
795 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
796 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200797 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400798 } else {
799 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200800 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400801 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200802 remain.size -= now.size;
803 remain.offset_within_address_space += now.size;
804 remain.offset_within_region += now.size;
805 }
806 now = remain;
807 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200808 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200809 }
810}
811
Sheng Yang62a27442010-01-26 19:21:16 +0800812void qemu_flush_coalesced_mmio_buffer(void)
813{
814 if (kvm_enabled())
815 kvm_flush_coalesced_mmio_buffer();
816}
817
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700818void qemu_mutex_lock_ramlist(void)
819{
820 qemu_mutex_lock(&ram_list.mutex);
821}
822
823void qemu_mutex_unlock_ramlist(void)
824{
825 qemu_mutex_unlock(&ram_list.mutex);
826}
827
Marcelo Tosattic9027602010-03-01 20:25:08 -0300828#if defined(__linux__) && !defined(TARGET_S390X)
829
830#include <sys/vfs.h>
831
832#define HUGETLBFS_MAGIC 0x958458f6
833
834static long gethugepagesize(const char *path)
835{
836 struct statfs fs;
837 int ret;
838
839 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900840 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300841 } while (ret != 0 && errno == EINTR);
842
843 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900844 perror(path);
845 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300846 }
847
848 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900849 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300850
851 return fs.f_bsize;
852}
853
Alex Williamson04b16652010-07-02 11:13:17 -0600854static void *file_ram_alloc(RAMBlock *block,
855 ram_addr_t memory,
856 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300857{
858 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500859 char *sanitized_name;
860 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300861 void *area;
862 int fd;
863#ifdef MAP_POPULATE
864 int flags;
865#endif
866 unsigned long hpagesize;
867
868 hpagesize = gethugepagesize(path);
869 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900870 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300871 }
872
873 if (memory < hpagesize) {
874 return NULL;
875 }
876
877 if (kvm_enabled() && !kvm_has_sync_mmu()) {
878 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
879 return NULL;
880 }
881
Peter Feiner8ca761f2013-03-04 13:54:25 -0500882 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
883 sanitized_name = g_strdup(block->mr->name);
884 for (c = sanitized_name; *c != '\0'; c++) {
885 if (*c == '/')
886 *c = '_';
887 }
888
889 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
890 sanitized_name);
891 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300892
893 fd = mkstemp(filename);
894 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900895 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100896 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900897 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300898 }
899 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100900 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300901
902 memory = (memory+hpagesize-1) & ~(hpagesize-1);
903
904 /*
905 * ftruncate is not supported by hugetlbfs in older
906 * hosts, so don't bother bailing out on errors.
907 * If anything goes wrong with it under other filesystems,
908 * mmap will fail.
909 */
910 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900911 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300912
913#ifdef MAP_POPULATE
914 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
915 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
916 * to sidestep this quirk.
917 */
918 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
919 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
920#else
921 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
922#endif
923 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900924 perror("file_ram_alloc: can't mmap RAM pages");
925 close(fd);
926 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300927 }
Alex Williamson04b16652010-07-02 11:13:17 -0600928 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300929 return area;
930}
931#endif
932
Alex Williamsond17b5282010-06-25 11:08:38 -0600933static ram_addr_t find_ram_offset(ram_addr_t size)
934{
Alex Williamson04b16652010-07-02 11:13:17 -0600935 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600936 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600937
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100938 assert(size != 0); /* it would hand out same offset multiple times */
939
Paolo Bonzinia3161032012-11-14 15:54:48 +0100940 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600941 return 0;
942
Paolo Bonzinia3161032012-11-14 15:54:48 +0100943 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000944 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600945
946 end = block->offset + block->length;
947
Paolo Bonzinia3161032012-11-14 15:54:48 +0100948 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600949 if (next_block->offset >= end) {
950 next = MIN(next, next_block->offset);
951 }
952 }
953 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600954 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600955 mingap = next - end;
956 }
957 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600958
959 if (offset == RAM_ADDR_MAX) {
960 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
961 (uint64_t)size);
962 abort();
963 }
964
Alex Williamson04b16652010-07-02 11:13:17 -0600965 return offset;
966}
967
Juan Quintela652d7ec2012-07-20 10:37:54 +0200968ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600969{
Alex Williamsond17b5282010-06-25 11:08:38 -0600970 RAMBlock *block;
971 ram_addr_t last = 0;
972
Paolo Bonzinia3161032012-11-14 15:54:48 +0100973 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600974 last = MAX(last, block->offset + block->length);
975
976 return last;
977}
978
Jason Baronddb97f12012-08-02 15:44:16 -0400979static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
980{
981 int ret;
982 QemuOpts *machine_opts;
983
984 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
985 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
986 if (machine_opts &&
987 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
988 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
989 if (ret) {
990 perror("qemu_madvise");
991 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
992 "but dump_guest_core=off specified\n");
993 }
994 }
995}
996
Avi Kivityc5705a72011-12-20 15:59:12 +0200997void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600998{
999 RAMBlock *new_block, *block;
1000
Avi Kivityc5705a72011-12-20 15:59:12 +02001001 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001002 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001003 if (block->offset == addr) {
1004 new_block = block;
1005 break;
1006 }
1007 }
1008 assert(new_block);
1009 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001010
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001011 if (dev) {
1012 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001013 if (id) {
1014 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001015 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001016 }
1017 }
1018 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1019
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001020 /* This assumes the iothread lock is taken here too. */
1021 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001022 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001023 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001024 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1025 new_block->idstr);
1026 abort();
1027 }
1028 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001029 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001030}
1031
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001032static int memory_try_enable_merging(void *addr, size_t len)
1033{
1034 QemuOpts *opts;
1035
1036 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1037 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1038 /* disabled by the user */
1039 return 0;
1040 }
1041
1042 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1043}
1044
Avi Kivityc5705a72011-12-20 15:59:12 +02001045ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1046 MemoryRegion *mr)
1047{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001048 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001049
1050 size = TARGET_PAGE_ALIGN(size);
1051 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001052
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001053 /* This assumes the iothread lock is taken here too. */
1054 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001055 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001056 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001057 if (host) {
1058 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001059 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001060 } else {
1061 if (mem_path) {
1062#if defined (__linux__) && !defined(TARGET_S390X)
1063 new_block->host = file_ram_alloc(new_block, size, mem_path);
1064 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001065 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001066 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001067 }
1068#else
1069 fprintf(stderr, "-mem-path option unsupported\n");
1070 exit(1);
1071#endif
1072 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001073 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001074 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001075 } else if (kvm_enabled()) {
1076 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001077 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001078 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001079 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001080 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001081 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001082 }
1083 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001084 new_block->length = size;
1085
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001086 /* Keep the list sorted from biggest to smallest block. */
1087 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1088 if (block->length < new_block->length) {
1089 break;
1090 }
1091 }
1092 if (block) {
1093 QTAILQ_INSERT_BEFORE(block, new_block, next);
1094 } else {
1095 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1096 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001097 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001098
Umesh Deshpandef798b072011-08-18 11:41:17 -07001099 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001100 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001101
Anthony Liguori7267c092011-08-20 22:09:37 -05001102 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001103 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001104 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1105 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001106 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001107
Jason Baronddb97f12012-08-02 15:44:16 -04001108 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001109 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001110
Cam Macdonell84b89d72010-07-26 18:10:57 -06001111 if (kvm_enabled())
1112 kvm_setup_guest_memory(new_block->host, size);
1113
1114 return new_block->offset;
1115}
1116
Avi Kivityc5705a72011-12-20 15:59:12 +02001117ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001118{
Avi Kivityc5705a72011-12-20 15:59:12 +02001119 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001120}
bellarde9a1ab12007-02-08 23:08:38 +00001121
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001122void qemu_ram_free_from_ptr(ram_addr_t addr)
1123{
1124 RAMBlock *block;
1125
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001126 /* This assumes the iothread lock is taken here too. */
1127 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001128 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001129 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001130 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001131 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001132 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001133 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001134 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001135 }
1136 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001137 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001138}
1139
Anthony Liguoric227f092009-10-01 16:12:16 -05001140void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001141{
Alex Williamson04b16652010-07-02 11:13:17 -06001142 RAMBlock *block;
1143
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001144 /* This assumes the iothread lock is taken here too. */
1145 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001146 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001147 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001148 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001149 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001150 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001151 if (block->flags & RAM_PREALLOC_MASK) {
1152 ;
1153 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001154#if defined (__linux__) && !defined(TARGET_S390X)
1155 if (block->fd) {
1156 munmap(block->host, block->length);
1157 close(block->fd);
1158 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001159 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001160 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001161#else
1162 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001163#endif
1164 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001165 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001166 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001167 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001168 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001169 }
Alex Williamson04b16652010-07-02 11:13:17 -06001170 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001171 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001172 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001173 }
1174 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001175 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001176
bellarde9a1ab12007-02-08 23:08:38 +00001177}
1178
Huang Yingcd19cfa2011-03-02 08:56:19 +01001179#ifndef _WIN32
1180void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1181{
1182 RAMBlock *block;
1183 ram_addr_t offset;
1184 int flags;
1185 void *area, *vaddr;
1186
Paolo Bonzinia3161032012-11-14 15:54:48 +01001187 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001188 offset = addr - block->offset;
1189 if (offset < block->length) {
1190 vaddr = block->host + offset;
1191 if (block->flags & RAM_PREALLOC_MASK) {
1192 ;
1193 } else {
1194 flags = MAP_FIXED;
1195 munmap(vaddr, length);
1196 if (mem_path) {
1197#if defined(__linux__) && !defined(TARGET_S390X)
1198 if (block->fd) {
1199#ifdef MAP_POPULATE
1200 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1201 MAP_PRIVATE;
1202#else
1203 flags |= MAP_PRIVATE;
1204#endif
1205 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1206 flags, block->fd, offset);
1207 } else {
1208 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1209 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1210 flags, -1, 0);
1211 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001212#else
1213 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001214#endif
1215 } else {
1216#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1217 flags |= MAP_SHARED | MAP_ANONYMOUS;
1218 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1219 flags, -1, 0);
1220#else
1221 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1222 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1223 flags, -1, 0);
1224#endif
1225 }
1226 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001227 fprintf(stderr, "Could not remap addr: "
1228 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001229 length, addr);
1230 exit(1);
1231 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001232 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001233 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001234 }
1235 return;
1236 }
1237 }
1238}
1239#endif /* !_WIN32 */
1240
pbrookdc828ca2009-04-09 22:21:07 +00001241/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001242 With the exception of the softmmu code in this file, this should
1243 only be used for local memory (e.g. video ram) that the device owns,
1244 and knows it isn't going to access beyond the end of the block.
1245
1246 It should not be used for general purpose DMA.
1247 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1248 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001249void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001250{
pbrook94a6b542009-04-11 17:15:54 +00001251 RAMBlock *block;
1252
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001253 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001254 block = ram_list.mru_block;
1255 if (block && addr - block->offset < block->length) {
1256 goto found;
1257 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001258 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001259 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001260 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001261 }
pbrook94a6b542009-04-11 17:15:54 +00001262 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001263
1264 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1265 abort();
1266
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001267found:
1268 ram_list.mru_block = block;
1269 if (xen_enabled()) {
1270 /* We need to check if the requested address is in the RAM
1271 * because we don't want to map the entire memory in QEMU.
1272 * In that case just map until the end of the page.
1273 */
1274 if (block->offset == 0) {
1275 return xen_map_cache(addr, 0, 0);
1276 } else if (block->host == NULL) {
1277 block->host =
1278 xen_map_cache(block->offset, block->length, 1);
1279 }
1280 }
1281 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001282}
1283
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001284/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1285 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1286 *
1287 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001288 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001289static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001290{
1291 RAMBlock *block;
1292
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001293 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001294 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001295 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001296 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001297 /* We need to check if the requested address is in the RAM
1298 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001299 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001300 */
1301 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001302 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001303 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001304 block->host =
1305 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001306 }
1307 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001308 return block->host + (addr - block->offset);
1309 }
1310 }
1311
1312 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1313 abort();
1314
1315 return NULL;
1316}
1317
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001318/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1319 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001320static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001321{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001322 if (*size == 0) {
1323 return NULL;
1324 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001325 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001326 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001327 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001328 RAMBlock *block;
1329
Paolo Bonzinia3161032012-11-14 15:54:48 +01001330 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001331 if (addr - block->offset < block->length) {
1332 if (addr - block->offset + *size > block->length)
1333 *size = block->length - addr + block->offset;
1334 return block->host + (addr - block->offset);
1335 }
1336 }
1337
1338 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1339 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001340 }
1341}
1342
Marcelo Tosattie8902612010-10-11 15:31:19 -03001343int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001344{
pbrook94a6b542009-04-11 17:15:54 +00001345 RAMBlock *block;
1346 uint8_t *host = ptr;
1347
Jan Kiszka868bb332011-06-21 22:59:09 +02001348 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001349 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001350 return 0;
1351 }
1352
Paolo Bonzinia3161032012-11-14 15:54:48 +01001353 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001354 /* This case append when the block is not mapped. */
1355 if (block->host == NULL) {
1356 continue;
1357 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001358 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001359 *ram_addr = block->offset + (host - block->host);
1360 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001361 }
pbrook94a6b542009-04-11 17:15:54 +00001362 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001363
Marcelo Tosattie8902612010-10-11 15:31:19 -03001364 return -1;
1365}
Alex Williamsonf471a172010-06-11 11:11:42 -06001366
Marcelo Tosattie8902612010-10-11 15:31:19 -03001367/* Some of the softmmu routines need to translate from a host pointer
1368 (typically a TLB entry) back to a ram offset. */
1369ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1370{
1371 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001372
Marcelo Tosattie8902612010-10-11 15:31:19 -03001373 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1374 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1375 abort();
1376 }
1377 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001378}
1379
Avi Kivitya8170e52012-10-23 12:30:10 +02001380static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001381 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001382{
pbrook67d3b952006-12-18 05:03:52 +00001383#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001384 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001385#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001386#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001387 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001388#endif
1389 return 0;
1390}
1391
Avi Kivitya8170e52012-10-23 12:30:10 +02001392static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001393 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001394{
1395#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001396 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001397#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001398#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001399 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001400#endif
1401}
1402
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001403static const MemoryRegionOps unassigned_mem_ops = {
1404 .read = unassigned_mem_read,
1405 .write = unassigned_mem_write,
1406 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001407};
1408
Avi Kivitya8170e52012-10-23 12:30:10 +02001409static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001410 unsigned size)
1411{
1412 abort();
1413}
1414
Avi Kivitya8170e52012-10-23 12:30:10 +02001415static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001416 uint64_t value, unsigned size)
1417{
1418 abort();
1419}
1420
1421static const MemoryRegionOps error_mem_ops = {
1422 .read = error_mem_read,
1423 .write = error_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001425};
1426
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001427static const MemoryRegionOps rom_mem_ops = {
1428 .read = error_mem_read,
1429 .write = unassigned_mem_write,
1430 .endianness = DEVICE_NATIVE_ENDIAN,
1431};
1432
Avi Kivitya8170e52012-10-23 12:30:10 +02001433static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001434 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001435{
bellard3a7d9292005-08-21 09:26:42 +00001436 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001437 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001438 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1439#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001440 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001441 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001442#endif
1443 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001444 switch (size) {
1445 case 1:
1446 stb_p(qemu_get_ram_ptr(ram_addr), val);
1447 break;
1448 case 2:
1449 stw_p(qemu_get_ram_ptr(ram_addr), val);
1450 break;
1451 case 4:
1452 stl_p(qemu_get_ram_ptr(ram_addr), val);
1453 break;
1454 default:
1455 abort();
1456 }
bellardf23db162005-08-21 19:12:28 +00001457 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001458 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001459 /* we remove the notdirty callback only if the code has been
1460 flushed */
1461 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001462 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001463}
1464
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001465static const MemoryRegionOps notdirty_mem_ops = {
1466 .read = error_mem_read,
1467 .write = notdirty_mem_write,
1468 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001469};
1470
pbrook0f459d12008-06-09 00:20:13 +00001471/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001472static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001473{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001474 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001475 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001476 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001477 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001478 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001479
aliguori06d55cc2008-11-18 20:24:06 +00001480 if (env->watchpoint_hit) {
1481 /* We re-entered the check after replacing the TB. Now raise
1482 * the debug interrupt so that is will trigger after the
1483 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001484 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001485 return;
1486 }
pbrook2e70f6e2008-06-29 01:03:05 +00001487 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001488 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001489 if ((vaddr == (wp->vaddr & len_mask) ||
1490 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001491 wp->flags |= BP_WATCHPOINT_HIT;
1492 if (!env->watchpoint_hit) {
1493 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001494 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001495 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1496 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001497 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001498 } else {
1499 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1500 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001501 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001502 }
aliguori06d55cc2008-11-18 20:24:06 +00001503 }
aliguori6e140f22008-11-18 20:37:55 +00001504 } else {
1505 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001506 }
1507 }
1508}
1509
pbrook6658ffb2007-03-16 23:58:11 +00001510/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1511 so these check for a hit then pass through to the normal out-of-line
1512 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001513static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001514 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001515{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001516 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1517 switch (size) {
1518 case 1: return ldub_phys(addr);
1519 case 2: return lduw_phys(addr);
1520 case 4: return ldl_phys(addr);
1521 default: abort();
1522 }
pbrook6658ffb2007-03-16 23:58:11 +00001523}
1524
Avi Kivitya8170e52012-10-23 12:30:10 +02001525static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001526 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001527{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001528 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1529 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001530 case 1:
1531 stb_phys(addr, val);
1532 break;
1533 case 2:
1534 stw_phys(addr, val);
1535 break;
1536 case 4:
1537 stl_phys(addr, val);
1538 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001539 default: abort();
1540 }
pbrook6658ffb2007-03-16 23:58:11 +00001541}
1542
Avi Kivity1ec9b902012-01-02 12:47:48 +02001543static const MemoryRegionOps watch_mem_ops = {
1544 .read = watch_mem_read,
1545 .write = watch_mem_write,
1546 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001547};
pbrook6658ffb2007-03-16 23:58:11 +00001548
Avi Kivitya8170e52012-10-23 12:30:10 +02001549static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001550 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001551{
Avi Kivity70c68e42012-01-02 12:32:48 +02001552 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001553 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001554 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001555#if defined(DEBUG_SUBPAGE)
1556 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1557 mmio, len, addr, idx);
1558#endif
blueswir1db7b5422007-05-26 17:36:03 +00001559
Avi Kivity5312bd82012-02-12 18:32:55 +02001560 section = &phys_sections[mmio->sub_section[idx]];
1561 addr += mmio->base;
1562 addr -= section->offset_within_address_space;
1563 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001564 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001565}
1566
Avi Kivitya8170e52012-10-23 12:30:10 +02001567static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001568 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001569{
Avi Kivity70c68e42012-01-02 12:32:48 +02001570 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001571 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001572 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001573#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001574 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1575 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001576 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001577#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001578
Avi Kivity5312bd82012-02-12 18:32:55 +02001579 section = &phys_sections[mmio->sub_section[idx]];
1580 addr += mmio->base;
1581 addr -= section->offset_within_address_space;
1582 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001583 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001584}
1585
Avi Kivity70c68e42012-01-02 12:32:48 +02001586static const MemoryRegionOps subpage_ops = {
1587 .read = subpage_read,
1588 .write = subpage_write,
1589 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001590};
1591
Avi Kivitya8170e52012-10-23 12:30:10 +02001592static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001593 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001594{
1595 ram_addr_t raddr = addr;
1596 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001597 switch (size) {
1598 case 1: return ldub_p(ptr);
1599 case 2: return lduw_p(ptr);
1600 case 4: return ldl_p(ptr);
1601 default: abort();
1602 }
Andreas Färber56384e82011-11-30 16:26:21 +01001603}
1604
Avi Kivitya8170e52012-10-23 12:30:10 +02001605static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001606 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001607{
1608 ram_addr_t raddr = addr;
1609 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001610 switch (size) {
1611 case 1: return stb_p(ptr, value);
1612 case 2: return stw_p(ptr, value);
1613 case 4: return stl_p(ptr, value);
1614 default: abort();
1615 }
Andreas Färber56384e82011-11-30 16:26:21 +01001616}
1617
Avi Kivityde712f92012-01-02 12:41:07 +02001618static const MemoryRegionOps subpage_ram_ops = {
1619 .read = subpage_ram_read,
1620 .write = subpage_ram_write,
1621 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001622};
1623
Anthony Liguoric227f092009-10-01 16:12:16 -05001624static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001625 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001626{
1627 int idx, eidx;
1628
1629 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1630 return -1;
1631 idx = SUBPAGE_IDX(start);
1632 eidx = SUBPAGE_IDX(end);
1633#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001634 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001635 mmio, start, end, idx, eidx, memory);
1636#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001637 if (memory_region_is_ram(phys_sections[section].mr)) {
1638 MemoryRegionSection new_section = phys_sections[section];
1639 new_section.mr = &io_mem_subpage_ram;
1640 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001641 }
blueswir1db7b5422007-05-26 17:36:03 +00001642 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001643 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001644 }
1645
1646 return 0;
1647}
1648
Avi Kivitya8170e52012-10-23 12:30:10 +02001649static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001650{
Anthony Liguoric227f092009-10-01 16:12:16 -05001651 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001652
Anthony Liguori7267c092011-08-20 22:09:37 -05001653 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001654
1655 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001656 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1657 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001658 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001659#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001660 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1661 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001662#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001663 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001664
1665 return mmio;
1666}
1667
Avi Kivity5312bd82012-02-12 18:32:55 +02001668static uint16_t dummy_section(MemoryRegion *mr)
1669{
1670 MemoryRegionSection section = {
1671 .mr = mr,
1672 .offset_within_address_space = 0,
1673 .offset_within_region = 0,
1674 .size = UINT64_MAX,
1675 };
1676
1677 return phys_section_add(&section);
1678}
1679
Avi Kivitya8170e52012-10-23 12:30:10 +02001680MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001681{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001682 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001683}
1684
Avi Kivitye9179ce2009-06-14 11:38:52 +03001685static void io_mem_init(void)
1686{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001687 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001688 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1689 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1690 "unassigned", UINT64_MAX);
1691 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1692 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001693 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1694 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001695 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1696 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001697}
1698
Avi Kivityac1970f2012-10-03 16:22:53 +02001699static void mem_begin(MemoryListener *listener)
1700{
1701 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1702
1703 destroy_all_mappings(d);
1704 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1705}
1706
Avi Kivity50c1e142012-02-08 21:36:02 +02001707static void core_begin(MemoryListener *listener)
1708{
Avi Kivity5312bd82012-02-12 18:32:55 +02001709 phys_sections_clear();
1710 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001711 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1712 phys_section_rom = dummy_section(&io_mem_rom);
1713 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001714}
1715
Avi Kivity1d711482012-10-02 18:54:45 +02001716static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001717{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001718 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001719
1720 /* since each CPU stores ram addresses in its TLB cache, we must
1721 reset the modified entries */
1722 /* XXX: slow ! */
1723 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1724 tlb_flush(env, 1);
1725 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001726}
1727
Avi Kivity93632742012-02-08 16:54:16 +02001728static void core_log_global_start(MemoryListener *listener)
1729{
1730 cpu_physical_memory_set_dirty_tracking(1);
1731}
1732
1733static void core_log_global_stop(MemoryListener *listener)
1734{
1735 cpu_physical_memory_set_dirty_tracking(0);
1736}
1737
Avi Kivity4855d412012-02-08 21:16:05 +02001738static void io_region_add(MemoryListener *listener,
1739 MemoryRegionSection *section)
1740{
Avi Kivitya2d33522012-03-05 17:40:12 +02001741 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1742
1743 mrio->mr = section->mr;
1744 mrio->offset = section->offset_within_region;
1745 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001746 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001747 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001748}
1749
1750static void io_region_del(MemoryListener *listener,
1751 MemoryRegionSection *section)
1752{
1753 isa_unassign_ioport(section->offset_within_address_space, section->size);
1754}
1755
Avi Kivity93632742012-02-08 16:54:16 +02001756static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001757 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001758 .log_global_start = core_log_global_start,
1759 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001760 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001761};
1762
Avi Kivity4855d412012-02-08 21:16:05 +02001763static MemoryListener io_memory_listener = {
1764 .region_add = io_region_add,
1765 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001766 .priority = 0,
1767};
1768
Avi Kivity1d711482012-10-02 18:54:45 +02001769static MemoryListener tcg_memory_listener = {
1770 .commit = tcg_commit,
1771};
1772
Avi Kivityac1970f2012-10-03 16:22:53 +02001773void address_space_init_dispatch(AddressSpace *as)
1774{
1775 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1776
1777 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1778 d->listener = (MemoryListener) {
1779 .begin = mem_begin,
1780 .region_add = mem_add,
1781 .region_nop = mem_add,
1782 .priority = 0,
1783 };
1784 as->dispatch = d;
1785 memory_listener_register(&d->listener, as);
1786}
1787
Avi Kivity83f3c252012-10-07 12:59:55 +02001788void address_space_destroy_dispatch(AddressSpace *as)
1789{
1790 AddressSpaceDispatch *d = as->dispatch;
1791
1792 memory_listener_unregister(&d->listener);
1793 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1794 g_free(d);
1795 as->dispatch = NULL;
1796}
1797
Avi Kivity62152b82011-07-26 14:26:14 +03001798static void memory_map_init(void)
1799{
Anthony Liguori7267c092011-08-20 22:09:37 -05001800 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001801 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001802 address_space_init(&address_space_memory, system_memory);
1803 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001804
Anthony Liguori7267c092011-08-20 22:09:37 -05001805 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001806 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001807 address_space_init(&address_space_io, system_io);
1808 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001809
Avi Kivityf6790af2012-10-02 20:13:51 +02001810 memory_listener_register(&core_memory_listener, &address_space_memory);
1811 memory_listener_register(&io_memory_listener, &address_space_io);
1812 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001813
1814 dma_context_init(&dma_context_memory, &address_space_memory,
1815 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001816}
1817
1818MemoryRegion *get_system_memory(void)
1819{
1820 return system_memory;
1821}
1822
Avi Kivity309cb472011-08-08 16:09:03 +03001823MemoryRegion *get_system_io(void)
1824{
1825 return system_io;
1826}
1827
pbrooke2eef172008-06-08 01:09:01 +00001828#endif /* !defined(CONFIG_USER_ONLY) */
1829
bellard13eb76e2004-01-24 15:23:36 +00001830/* physical memory access (slow version, mainly for debug) */
1831#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001832int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001833 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001834{
1835 int l, flags;
1836 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001837 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001838
1839 while (len > 0) {
1840 page = addr & TARGET_PAGE_MASK;
1841 l = (page + TARGET_PAGE_SIZE) - addr;
1842 if (l > len)
1843 l = len;
1844 flags = page_get_flags(page);
1845 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001846 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001847 if (is_write) {
1848 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001849 return -1;
bellard579a97f2007-11-11 14:26:47 +00001850 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001851 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001852 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001853 memcpy(p, buf, l);
1854 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001855 } else {
1856 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001857 return -1;
bellard579a97f2007-11-11 14:26:47 +00001858 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001859 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001860 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001861 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001862 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001863 }
1864 len -= l;
1865 buf += l;
1866 addr += l;
1867 }
Paul Brooka68fe892010-03-01 00:08:59 +00001868 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001869}
bellard8df1cd02005-01-28 22:37:22 +00001870
bellard13eb76e2004-01-24 15:23:36 +00001871#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001872
Avi Kivitya8170e52012-10-23 12:30:10 +02001873static void invalidate_and_set_dirty(hwaddr addr,
1874 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001875{
1876 if (!cpu_physical_memory_is_dirty(addr)) {
1877 /* invalidate code */
1878 tb_invalidate_phys_page_range(addr, addr + length, 0);
1879 /* set dirty bit */
1880 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1881 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001882 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001883}
1884
Avi Kivitya8170e52012-10-23 12:30:10 +02001885void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001886 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001887{
Avi Kivityac1970f2012-10-03 16:22:53 +02001888 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001889 int l;
bellard13eb76e2004-01-24 15:23:36 +00001890 uint8_t *ptr;
1891 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001892 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001893 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001894
bellard13eb76e2004-01-24 15:23:36 +00001895 while (len > 0) {
1896 page = addr & TARGET_PAGE_MASK;
1897 l = (page + TARGET_PAGE_SIZE) - addr;
1898 if (l > len)
1899 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001900 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001901
bellard13eb76e2004-01-24 15:23:36 +00001902 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001903 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001904 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001905 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001906 /* XXX: could force cpu_single_env to NULL to avoid
1907 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001908 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001909 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001910 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001911 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001912 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001913 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001914 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001915 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001916 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001917 l = 2;
1918 } else {
bellard1c213d12005-09-03 10:49:04 +00001919 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001920 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001921 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001922 l = 1;
1923 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001924 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001925 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001926 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001927 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001928 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001929 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001930 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001931 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001932 }
1933 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001934 if (!(memory_region_is_ram(section->mr) ||
1935 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001936 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001937 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001938 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001939 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001940 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001941 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001942 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001943 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001944 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001945 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001946 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001947 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001948 l = 2;
1949 } else {
bellard1c213d12005-09-03 10:49:04 +00001950 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001951 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001952 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001953 l = 1;
1954 }
1955 } else {
1956 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001957 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001958 + memory_region_section_addr(section,
1959 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001960 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001961 }
1962 }
1963 len -= l;
1964 buf += l;
1965 addr += l;
1966 }
1967}
bellard8df1cd02005-01-28 22:37:22 +00001968
Avi Kivitya8170e52012-10-23 12:30:10 +02001969void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001970 const uint8_t *buf, int len)
1971{
1972 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1973}
1974
1975/**
1976 * address_space_read: read from an address space.
1977 *
1978 * @as: #AddressSpace to be accessed
1979 * @addr: address within that address space
1980 * @buf: buffer with the data transferred
1981 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001982void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001983{
1984 address_space_rw(as, addr, buf, len, false);
1985}
1986
1987
Avi Kivitya8170e52012-10-23 12:30:10 +02001988void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001989 int len, int is_write)
1990{
1991 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1992}
1993
bellardd0ecd2a2006-04-23 17:14:48 +00001994/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001995void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001996 const uint8_t *buf, int len)
1997{
Avi Kivityac1970f2012-10-03 16:22:53 +02001998 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001999 int l;
2000 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02002001 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002002 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00002003
bellardd0ecd2a2006-04-23 17:14:48 +00002004 while (len > 0) {
2005 page = addr & TARGET_PAGE_MASK;
2006 l = (page + TARGET_PAGE_SIZE) - addr;
2007 if (l > len)
2008 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002009 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002010
Blue Swirlcc5bea62012-04-14 14:56:48 +00002011 if (!(memory_region_is_ram(section->mr) ||
2012 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002013 /* do nothing */
2014 } else {
2015 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002016 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002017 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002018 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002019 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002020 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002021 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002022 }
2023 len -= l;
2024 buf += l;
2025 addr += l;
2026 }
2027}
2028
aliguori6d16c2f2009-01-22 16:59:11 +00002029typedef struct {
2030 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002031 hwaddr addr;
2032 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002033} BounceBuffer;
2034
2035static BounceBuffer bounce;
2036
aliguoriba223c22009-01-22 16:59:16 +00002037typedef struct MapClient {
2038 void *opaque;
2039 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002040 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002041} MapClient;
2042
Blue Swirl72cf2d42009-09-12 07:36:22 +00002043static QLIST_HEAD(map_client_list, MapClient) map_client_list
2044 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002045
2046void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2047{
Anthony Liguori7267c092011-08-20 22:09:37 -05002048 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002049
2050 client->opaque = opaque;
2051 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002052 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002053 return client;
2054}
2055
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002056static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002057{
2058 MapClient *client = (MapClient *)_client;
2059
Blue Swirl72cf2d42009-09-12 07:36:22 +00002060 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002061 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002062}
2063
2064static void cpu_notify_map_clients(void)
2065{
2066 MapClient *client;
2067
Blue Swirl72cf2d42009-09-12 07:36:22 +00002068 while (!QLIST_EMPTY(&map_client_list)) {
2069 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002070 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002071 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002072 }
2073}
2074
aliguori6d16c2f2009-01-22 16:59:11 +00002075/* Map a physical memory region into a host virtual address.
2076 * May map a subset of the requested range, given by and returned in *plen.
2077 * May return NULL if resources needed to perform the mapping are exhausted.
2078 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002079 * Use cpu_register_map_client() to know when retrying the map operation is
2080 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002081 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002082void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002083 hwaddr addr,
2084 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002085 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002086{
Avi Kivityac1970f2012-10-03 16:22:53 +02002087 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002088 hwaddr len = *plen;
2089 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002090 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002091 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002092 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002093 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002094 ram_addr_t rlen;
2095 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002096
2097 while (len > 0) {
2098 page = addr & TARGET_PAGE_MASK;
2099 l = (page + TARGET_PAGE_SIZE) - addr;
2100 if (l > len)
2101 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002102 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002103
Avi Kivityf3705d52012-03-08 16:16:34 +02002104 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002105 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002106 break;
2107 }
2108 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2109 bounce.addr = addr;
2110 bounce.len = l;
2111 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002112 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002113 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002114
2115 *plen = l;
2116 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002117 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002118 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002119 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002120 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002121 }
aliguori6d16c2f2009-01-22 16:59:11 +00002122
2123 len -= l;
2124 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002125 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002126 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002127 rlen = todo;
2128 ret = qemu_ram_ptr_length(raddr, &rlen);
2129 *plen = rlen;
2130 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002131}
2132
Avi Kivityac1970f2012-10-03 16:22:53 +02002133/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002134 * Will also mark the memory as dirty if is_write == 1. access_len gives
2135 * the amount of memory that was actually read or written by the caller.
2136 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002137void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2138 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002139{
2140 if (buffer != bounce.buffer) {
2141 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002142 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002143 while (access_len) {
2144 unsigned l;
2145 l = TARGET_PAGE_SIZE;
2146 if (l > access_len)
2147 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002148 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002149 addr1 += l;
2150 access_len -= l;
2151 }
2152 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002153 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002154 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002155 }
aliguori6d16c2f2009-01-22 16:59:11 +00002156 return;
2157 }
2158 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002159 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002160 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002161 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002162 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002163 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002164}
bellardd0ecd2a2006-04-23 17:14:48 +00002165
Avi Kivitya8170e52012-10-23 12:30:10 +02002166void *cpu_physical_memory_map(hwaddr addr,
2167 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002168 int is_write)
2169{
2170 return address_space_map(&address_space_memory, addr, plen, is_write);
2171}
2172
Avi Kivitya8170e52012-10-23 12:30:10 +02002173void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2174 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002175{
2176 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2177}
2178
bellard8df1cd02005-01-28 22:37:22 +00002179/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002180static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002181 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002182{
bellard8df1cd02005-01-28 22:37:22 +00002183 uint8_t *ptr;
2184 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002185 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002186
Avi Kivityac1970f2012-10-03 16:22:53 +02002187 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002188
Blue Swirlcc5bea62012-04-14 14:56:48 +00002189 if (!(memory_region_is_ram(section->mr) ||
2190 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002191 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002192 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002193 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002194#if defined(TARGET_WORDS_BIGENDIAN)
2195 if (endian == DEVICE_LITTLE_ENDIAN) {
2196 val = bswap32(val);
2197 }
2198#else
2199 if (endian == DEVICE_BIG_ENDIAN) {
2200 val = bswap32(val);
2201 }
2202#endif
bellard8df1cd02005-01-28 22:37:22 +00002203 } else {
2204 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002205 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002206 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002207 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002208 switch (endian) {
2209 case DEVICE_LITTLE_ENDIAN:
2210 val = ldl_le_p(ptr);
2211 break;
2212 case DEVICE_BIG_ENDIAN:
2213 val = ldl_be_p(ptr);
2214 break;
2215 default:
2216 val = ldl_p(ptr);
2217 break;
2218 }
bellard8df1cd02005-01-28 22:37:22 +00002219 }
2220 return val;
2221}
2222
Avi Kivitya8170e52012-10-23 12:30:10 +02002223uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002224{
2225 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2226}
2227
Avi Kivitya8170e52012-10-23 12:30:10 +02002228uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002229{
2230 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2231}
2232
Avi Kivitya8170e52012-10-23 12:30:10 +02002233uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002234{
2235 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2236}
2237
bellard84b7b8e2005-11-28 21:19:04 +00002238/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002239static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002240 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002241{
bellard84b7b8e2005-11-28 21:19:04 +00002242 uint8_t *ptr;
2243 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002244 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002245
Avi Kivityac1970f2012-10-03 16:22:53 +02002246 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002247
Blue Swirlcc5bea62012-04-14 14:56:48 +00002248 if (!(memory_region_is_ram(section->mr) ||
2249 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002250 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002251 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002252
2253 /* XXX This is broken when device endian != cpu endian.
2254 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002255#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002256 val = io_mem_read(section->mr, addr, 4) << 32;
2257 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002258#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002259 val = io_mem_read(section->mr, addr, 4);
2260 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002261#endif
2262 } else {
2263 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002264 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002265 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002266 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002267 switch (endian) {
2268 case DEVICE_LITTLE_ENDIAN:
2269 val = ldq_le_p(ptr);
2270 break;
2271 case DEVICE_BIG_ENDIAN:
2272 val = ldq_be_p(ptr);
2273 break;
2274 default:
2275 val = ldq_p(ptr);
2276 break;
2277 }
bellard84b7b8e2005-11-28 21:19:04 +00002278 }
2279 return val;
2280}
2281
Avi Kivitya8170e52012-10-23 12:30:10 +02002282uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002283{
2284 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2285}
2286
Avi Kivitya8170e52012-10-23 12:30:10 +02002287uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002288{
2289 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2290}
2291
Avi Kivitya8170e52012-10-23 12:30:10 +02002292uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002293{
2294 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2295}
2296
bellardaab33092005-10-30 20:48:42 +00002297/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002298uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002299{
2300 uint8_t val;
2301 cpu_physical_memory_read(addr, &val, 1);
2302 return val;
2303}
2304
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002305/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002306static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002307 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002308{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002309 uint8_t *ptr;
2310 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002311 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002312
Avi Kivityac1970f2012-10-03 16:22:53 +02002313 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002314
Blue Swirlcc5bea62012-04-14 14:56:48 +00002315 if (!(memory_region_is_ram(section->mr) ||
2316 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002317 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002318 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002319 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002320#if defined(TARGET_WORDS_BIGENDIAN)
2321 if (endian == DEVICE_LITTLE_ENDIAN) {
2322 val = bswap16(val);
2323 }
2324#else
2325 if (endian == DEVICE_BIG_ENDIAN) {
2326 val = bswap16(val);
2327 }
2328#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002329 } else {
2330 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002331 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002332 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002333 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002334 switch (endian) {
2335 case DEVICE_LITTLE_ENDIAN:
2336 val = lduw_le_p(ptr);
2337 break;
2338 case DEVICE_BIG_ENDIAN:
2339 val = lduw_be_p(ptr);
2340 break;
2341 default:
2342 val = lduw_p(ptr);
2343 break;
2344 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002345 }
2346 return val;
bellardaab33092005-10-30 20:48:42 +00002347}
2348
Avi Kivitya8170e52012-10-23 12:30:10 +02002349uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002350{
2351 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2352}
2353
Avi Kivitya8170e52012-10-23 12:30:10 +02002354uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002355{
2356 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2357}
2358
Avi Kivitya8170e52012-10-23 12:30:10 +02002359uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002360{
2361 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2362}
2363
bellard8df1cd02005-01-28 22:37:22 +00002364/* warning: addr must be aligned. The ram page is not masked as dirty
2365 and the code inside is not invalidated. It is useful if the dirty
2366 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002367void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002368{
bellard8df1cd02005-01-28 22:37:22 +00002369 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002370 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002371
Avi Kivityac1970f2012-10-03 16:22:53 +02002372 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002373
Avi Kivityf3705d52012-03-08 16:16:34 +02002374 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002375 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002376 if (memory_region_is_ram(section->mr)) {
2377 section = &phys_sections[phys_section_rom];
2378 }
2379 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002380 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002381 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002382 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002383 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002384 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002385 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002386
2387 if (unlikely(in_migration)) {
2388 if (!cpu_physical_memory_is_dirty(addr1)) {
2389 /* invalidate code */
2390 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2391 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002392 cpu_physical_memory_set_dirty_flags(
2393 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002394 }
2395 }
bellard8df1cd02005-01-28 22:37:22 +00002396 }
2397}
2398
2399/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002400static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002401 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002402{
bellard8df1cd02005-01-28 22:37:22 +00002403 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002404 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002405
Avi Kivityac1970f2012-10-03 16:22:53 +02002406 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002407
Avi Kivityf3705d52012-03-08 16:16:34 +02002408 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002409 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002410 if (memory_region_is_ram(section->mr)) {
2411 section = &phys_sections[phys_section_rom];
2412 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002413#if defined(TARGET_WORDS_BIGENDIAN)
2414 if (endian == DEVICE_LITTLE_ENDIAN) {
2415 val = bswap32(val);
2416 }
2417#else
2418 if (endian == DEVICE_BIG_ENDIAN) {
2419 val = bswap32(val);
2420 }
2421#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002422 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002423 } else {
2424 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002425 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002426 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002427 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002428 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002429 switch (endian) {
2430 case DEVICE_LITTLE_ENDIAN:
2431 stl_le_p(ptr, val);
2432 break;
2433 case DEVICE_BIG_ENDIAN:
2434 stl_be_p(ptr, val);
2435 break;
2436 default:
2437 stl_p(ptr, val);
2438 break;
2439 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002440 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002441 }
2442}
2443
Avi Kivitya8170e52012-10-23 12:30:10 +02002444void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002445{
2446 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2447}
2448
Avi Kivitya8170e52012-10-23 12:30:10 +02002449void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002450{
2451 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2452}
2453
Avi Kivitya8170e52012-10-23 12:30:10 +02002454void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002455{
2456 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2457}
2458
bellardaab33092005-10-30 20:48:42 +00002459/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002460void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002461{
2462 uint8_t v = val;
2463 cpu_physical_memory_write(addr, &v, 1);
2464}
2465
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002466/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002467static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002468 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002469{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002470 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002471 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002472
Avi Kivityac1970f2012-10-03 16:22:53 +02002473 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002474
Avi Kivityf3705d52012-03-08 16:16:34 +02002475 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002476 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002477 if (memory_region_is_ram(section->mr)) {
2478 section = &phys_sections[phys_section_rom];
2479 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002480#if defined(TARGET_WORDS_BIGENDIAN)
2481 if (endian == DEVICE_LITTLE_ENDIAN) {
2482 val = bswap16(val);
2483 }
2484#else
2485 if (endian == DEVICE_BIG_ENDIAN) {
2486 val = bswap16(val);
2487 }
2488#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002489 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002490 } else {
2491 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002492 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002493 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002494 /* RAM case */
2495 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002496 switch (endian) {
2497 case DEVICE_LITTLE_ENDIAN:
2498 stw_le_p(ptr, val);
2499 break;
2500 case DEVICE_BIG_ENDIAN:
2501 stw_be_p(ptr, val);
2502 break;
2503 default:
2504 stw_p(ptr, val);
2505 break;
2506 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002507 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002508 }
bellardaab33092005-10-30 20:48:42 +00002509}
2510
Avi Kivitya8170e52012-10-23 12:30:10 +02002511void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002512{
2513 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2514}
2515
Avi Kivitya8170e52012-10-23 12:30:10 +02002516void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002517{
2518 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2519}
2520
Avi Kivitya8170e52012-10-23 12:30:10 +02002521void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002522{
2523 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2524}
2525
bellardaab33092005-10-30 20:48:42 +00002526/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002527void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002528{
2529 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002530 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002531}
2532
Avi Kivitya8170e52012-10-23 12:30:10 +02002533void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002534{
2535 val = cpu_to_le64(val);
2536 cpu_physical_memory_write(addr, &val, 8);
2537}
2538
Avi Kivitya8170e52012-10-23 12:30:10 +02002539void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002540{
2541 val = cpu_to_be64(val);
2542 cpu_physical_memory_write(addr, &val, 8);
2543}
2544
aliguori5e2972f2009-03-28 17:51:36 +00002545/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002546int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002547 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002548{
2549 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002550 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002551 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002552
2553 while (len > 0) {
2554 page = addr & TARGET_PAGE_MASK;
2555 phys_addr = cpu_get_phys_page_debug(env, page);
2556 /* if no physical page mapped, return an error */
2557 if (phys_addr == -1)
2558 return -1;
2559 l = (page + TARGET_PAGE_SIZE) - addr;
2560 if (l > len)
2561 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002562 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002563 if (is_write)
2564 cpu_physical_memory_write_rom(phys_addr, buf, l);
2565 else
aliguori5e2972f2009-03-28 17:51:36 +00002566 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002567 len -= l;
2568 buf += l;
2569 addr += l;
2570 }
2571 return 0;
2572}
Paul Brooka68fe892010-03-01 00:08:59 +00002573#endif
bellard13eb76e2004-01-24 15:23:36 +00002574
Blue Swirl8e4a4242013-01-06 18:30:17 +00002575#if !defined(CONFIG_USER_ONLY)
2576
2577/*
2578 * A helper function for the _utterly broken_ virtio device model to find out if
2579 * it's running on a big endian machine. Don't do this at home kids!
2580 */
2581bool virtio_is_big_endian(void);
2582bool virtio_is_big_endian(void)
2583{
2584#if defined(TARGET_WORDS_BIGENDIAN)
2585 return true;
2586#else
2587 return false;
2588#endif
2589}
2590
2591#endif
2592
Wen Congyang76f35532012-05-07 12:04:18 +08002593#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002594bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002595{
2596 MemoryRegionSection *section;
2597
Avi Kivityac1970f2012-10-03 16:22:53 +02002598 section = phys_page_find(address_space_memory.dispatch,
2599 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002600
2601 return !(memory_region_is_ram(section->mr) ||
2602 memory_region_is_romd(section->mr));
2603}
2604#endif