blob: 3fdca46a6afc139d7917e655368262d979deb667 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
Andreas Färberb170fce2013-01-20 20:23:22 +0100222#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber259186a2013-01-17 18:51:17 +0100226 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
Andreas Färberb170fce2013-01-20 20:23:22 +0100248#else
249#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000250#endif
251
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100252CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400253{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100254 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100255 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400256
257 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400260 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100261 }
Glauber Costa950f1472009-06-09 12:15:18 -0400262 env = env->next_cpu;
263 }
264
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100265 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400266}
267
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200268void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
269{
270 CPUArchState *env = first_cpu;
271
272 while (env) {
273 func(ENV_GET_CPU(env), data);
274 env = env->next_cpu;
275 }
276}
277
Andreas Färber9349b4f2012-03-14 01:38:32 +0100278void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000279{
Andreas Färber9f09e182012-05-03 06:59:07 +0200280 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100281 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100282 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000283 int cpu_index;
284
pbrookc2764712009-03-07 15:24:59 +0000285#if defined(CONFIG_USER_ONLY)
286 cpu_list_lock();
287#endif
bellard6a00d602005-11-21 23:25:50 +0000288 env->next_cpu = NULL;
289 penv = &first_cpu;
290 cpu_index = 0;
291 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700292 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000293 cpu_index++;
294 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100295 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100296 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000297 QTAILQ_INIT(&env->breakpoints);
298 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100299#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200300 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100301#endif
bellard6a00d602005-11-21 23:25:50 +0000302 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000303#if defined(CONFIG_USER_ONLY)
304 cpu_list_unlock();
305#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100306 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000307#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600308 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000309 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100310 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000311#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100312 if (cc->vmsd != NULL) {
313 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
314 }
bellardfd6ce8f2003-05-14 19:00:11 +0000315}
316
bellard1fddef42005-04-17 19:16:13 +0000317#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000318#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100319static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000320{
321 tb_invalidate_phys_page_range(pc, pc + 1, 0);
322}
323#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400324static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
325{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
327 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400328}
bellardc27004e2005-01-03 23:35:10 +0000329#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000330#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000331
Paul Brookc527ee82010-03-01 03:31:14 +0000332#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100333void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000334
335{
336}
337
Andreas Färber9349b4f2012-03-14 01:38:32 +0100338int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000339 int flags, CPUWatchpoint **watchpoint)
340{
341 return -ENOSYS;
342}
343#else
pbrook6658ffb2007-03-16 23:58:11 +0000344/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100345int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000346 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000347{
aliguorib4051332008-11-18 20:14:20 +0000348 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000349 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000350
aliguorib4051332008-11-18 20:14:20 +0000351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400352 if ((len & (len - 1)) || (addr & ~len_mask) ||
353 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000354 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
356 return -EINVAL;
357 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500358 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000359
aliguoria1d1bb32008-11-18 20:07:32 +0000360 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000361 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000362 wp->flags = flags;
363
aliguori2dc9f412008-11-18 20:56:59 +0000364 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000365 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000366 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000367 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000368 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000369
pbrook6658ffb2007-03-16 23:58:11 +0000370 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000371
372 if (watchpoint)
373 *watchpoint = wp;
374 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000375}
376
aliguoria1d1bb32008-11-18 20:07:32 +0000377/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100378int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000379 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000380{
aliguorib4051332008-11-18 20:14:20 +0000381 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000382 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000383
Blue Swirl72cf2d42009-09-12 07:36:22 +0000384 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000385 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000386 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000387 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000388 return 0;
389 }
390 }
aliguoria1d1bb32008-11-18 20:07:32 +0000391 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000392}
393
aliguoria1d1bb32008-11-18 20:07:32 +0000394/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100395void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000396{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000397 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000398
aliguoria1d1bb32008-11-18 20:07:32 +0000399 tlb_flush_page(env, watchpoint->vaddr);
400
Anthony Liguori7267c092011-08-20 22:09:37 -0500401 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000402}
403
aliguoria1d1bb32008-11-18 20:07:32 +0000404/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100405void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000406{
aliguoric0ce9982008-11-25 22:13:57 +0000407 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000408
Blue Swirl72cf2d42009-09-12 07:36:22 +0000409 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000410 if (wp->flags & mask)
411 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000412 }
aliguoria1d1bb32008-11-18 20:07:32 +0000413}
Paul Brookc527ee82010-03-01 03:31:14 +0000414#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000415
416/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100417int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000418 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000419{
bellard1fddef42005-04-17 19:16:13 +0000420#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000421 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000422
Anthony Liguori7267c092011-08-20 22:09:37 -0500423 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000424
425 bp->pc = pc;
426 bp->flags = flags;
427
aliguori2dc9f412008-11-18 20:56:59 +0000428 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000429 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000430 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000431 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000432 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000433
434 breakpoint_invalidate(env, pc);
435
436 if (breakpoint)
437 *breakpoint = bp;
438 return 0;
439#else
440 return -ENOSYS;
441#endif
442}
443
444/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100445int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000446{
447#if defined(TARGET_HAS_ICE)
448 CPUBreakpoint *bp;
449
Blue Swirl72cf2d42009-09-12 07:36:22 +0000450 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000451 if (bp->pc == pc && bp->flags == flags) {
452 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000453 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000454 }
bellard4c3a88a2003-07-26 12:06:08 +0000455 }
aliguoria1d1bb32008-11-18 20:07:32 +0000456 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000457#else
aliguoria1d1bb32008-11-18 20:07:32 +0000458 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000459#endif
460}
461
aliguoria1d1bb32008-11-18 20:07:32 +0000462/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100463void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000464{
bellard1fddef42005-04-17 19:16:13 +0000465#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000466 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000467
aliguoria1d1bb32008-11-18 20:07:32 +0000468 breakpoint_invalidate(env, breakpoint->pc);
469
Anthony Liguori7267c092011-08-20 22:09:37 -0500470 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000471#endif
472}
473
474/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100475void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000476{
477#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000478 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000479
Blue Swirl72cf2d42009-09-12 07:36:22 +0000480 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000481 if (bp->flags & mask)
482 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000483 }
bellard4c3a88a2003-07-26 12:06:08 +0000484#endif
485}
486
bellardc33a3462003-07-29 20:50:33 +0000487/* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100489void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000490{
bellard1fddef42005-04-17 19:16:13 +0000491#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000492 if (env->singlestep_enabled != enabled) {
493 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000494 if (kvm_enabled())
495 kvm_update_guest_debug(env, 0);
496 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100497 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000498 /* XXX: only flush what is necessary */
499 tb_flush(env);
500 }
bellardc33a3462003-07-29 20:50:33 +0000501 }
502#endif
503}
504
Andreas Färber9349b4f2012-03-14 01:38:32 +0100505void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000506{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100507 CPUState *cpu = ENV_GET_CPU(env);
508
509 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000510 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000511}
512
Andreas Färber9349b4f2012-03-14 01:38:32 +0100513void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000514{
515 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000516 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000517
518 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000519 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000520 fprintf(stderr, "qemu: fatal: ");
521 vfprintf(stderr, fmt, ap);
522 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100523 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt, ap2);
527 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100528 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000529 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000530 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000531 }
pbrook493ae1f2007-11-23 16:53:59 +0000532 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000533 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200534#if defined(CONFIG_USER_ONLY)
535 {
536 struct sigaction act;
537 sigfillset(&act.sa_mask);
538 act.sa_handler = SIG_DFL;
539 sigaction(SIGABRT, &act, NULL);
540 }
541#endif
bellard75012672003-06-21 13:11:07 +0000542 abort();
543}
544
Andreas Färber9349b4f2012-03-14 01:38:32 +0100545CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000546{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100547 CPUArchState *new_env = cpu_init(env->cpu_model_str);
548 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000549#if defined(TARGET_HAS_ICE)
550 CPUBreakpoint *bp;
551 CPUWatchpoint *wp;
552#endif
553
Andreas Färber9349b4f2012-03-14 01:38:32 +0100554 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000555
Andreas Färber55e5c282012-12-17 06:18:02 +0100556 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000557 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000558
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000562 QTAILQ_INIT(&env->breakpoints);
563 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000564#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000566 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
567 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000568 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000569 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
570 wp->flags, NULL);
571 }
572#endif
573
thsc5be9f02007-02-28 20:20:53 +0000574 return new_env;
575}
576
bellard01243112004-01-04 15:48:17 +0000577#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200578static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
579 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000580{
Juan Quintelad24981d2012-05-22 00:42:40 +0200581 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000582
bellard1ccde1c2004-02-06 19:46:14 +0000583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200585 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200586 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000587 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200588 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000589 != (end - 1) - start) {
590 abort();
591 }
Blue Swirle5548612012-04-21 13:08:33 +0000592 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200593
594}
595
596/* Note: start and end must be within the same ram block. */
597void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
598 int dirty_flags)
599{
600 uintptr_t length;
601
602 start &= TARGET_PAGE_MASK;
603 end = TARGET_PAGE_ALIGN(end);
604
605 length = end - start;
606 if (length == 0)
607 return;
608 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
609
610 if (tcg_enabled()) {
611 tlb_reset_dirty_range_all(start, end, length);
612 }
bellard1ccde1c2004-02-06 19:46:14 +0000613}
614
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000615static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000616{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200617 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000618 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200619 return ret;
aliguori74576192008-10-06 14:02:03 +0000620}
621
Avi Kivitya8170e52012-10-23 12:30:10 +0200622hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000623 MemoryRegionSection *section,
624 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200625 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000626 int prot,
627 target_ulong *address)
628{
Avi Kivitya8170e52012-10-23 12:30:10 +0200629 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000630 CPUWatchpoint *wp;
631
Blue Swirlcc5bea62012-04-14 14:56:48 +0000632 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000633 /* Normal RAM. */
634 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000635 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000636 if (!section->readonly) {
637 iotlb |= phys_section_notdirty;
638 } else {
639 iotlb |= phys_section_rom;
640 }
641 } else {
Blue Swirle5548612012-04-21 13:08:33 +0000642 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000643 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000644 }
645
646 /* Make accesses to pages with watchpoints go via the
647 watchpoint trap routines. */
648 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
649 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
650 /* Avoid trapping reads of pages with a write breakpoint. */
651 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
652 iotlb = phys_section_watch + paddr;
653 *address |= TLB_MMIO;
654 break;
655 }
656 }
657 }
658
659 return iotlb;
660}
bellard9fa3e852004-01-04 18:06:42 +0000661#endif /* defined(CONFIG_USER_ONLY) */
662
pbrooke2eef172008-06-08 01:09:01 +0000663#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000664
Paul Brookc04b2b72010-03-01 03:31:14 +0000665#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
666typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200667 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200668 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000670} subpage_t;
671
Anthony Liguoric227f092009-10-01 16:12:16 -0500672static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200673 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200674static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200675static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200676{
Avi Kivity5312bd82012-02-12 18:32:55 +0200677 MemoryRegionSection *section = &phys_sections[section_index];
678 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200679
680 if (mr->subpage) {
681 subpage_t *subpage = container_of(mr, subpage_t, iomem);
682 memory_region_destroy(&subpage->iomem);
683 g_free(subpage);
684 }
685}
686
Avi Kivity4346ae32012-02-10 17:00:01 +0200687static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200688{
689 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200690 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200691
Avi Kivityc19e8802012-02-13 20:25:31 +0200692 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200693 return;
694 }
695
Avi Kivityc19e8802012-02-13 20:25:31 +0200696 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200697 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200698 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200699 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200700 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200701 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200702 }
Avi Kivity54688b12012-02-09 17:34:32 +0200703 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200704 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200705 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200706}
707
Avi Kivityac1970f2012-10-03 16:22:53 +0200708static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200709{
Avi Kivityac1970f2012-10-03 16:22:53 +0200710 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200711 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200712}
713
Avi Kivity5312bd82012-02-12 18:32:55 +0200714static uint16_t phys_section_add(MemoryRegionSection *section)
715{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200716 /* The physical section number is ORed with a page-aligned
717 * pointer to produce the iotlb entries. Thus it should
718 * never overflow into the page-aligned value.
719 */
720 assert(phys_sections_nb < TARGET_PAGE_SIZE);
721
Avi Kivity5312bd82012-02-12 18:32:55 +0200722 if (phys_sections_nb == phys_sections_nb_alloc) {
723 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
724 phys_sections = g_renew(MemoryRegionSection, phys_sections,
725 phys_sections_nb_alloc);
726 }
727 phys_sections[phys_sections_nb] = *section;
728 return phys_sections_nb++;
729}
730
731static void phys_sections_clear(void)
732{
733 phys_sections_nb = 0;
734}
735
Avi Kivityac1970f2012-10-03 16:22:53 +0200736static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200737{
738 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200739 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200740 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200741 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200742 MemoryRegionSection subsection = {
743 .offset_within_address_space = base,
744 .size = TARGET_PAGE_SIZE,
745 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200746 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200747
Avi Kivityf3705d52012-03-08 16:16:34 +0200748 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200749
Avi Kivityf3705d52012-03-08 16:16:34 +0200750 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200751 subpage = subpage_init(base);
752 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200753 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200754 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200755 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200756 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200757 }
758 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400759 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200760 subpage_register(subpage, start, end, phys_section_add(section));
761}
762
763
Avi Kivityac1970f2012-10-03 16:22:53 +0200764static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000765{
Avi Kivitya8170e52012-10-23 12:30:10 +0200766 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200767 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200768 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200769 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200770
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200771 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200772
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200773 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200774 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200775 section_index);
bellard33417e72003-08-10 21:47:01 +0000776}
777
Avi Kivity86a86232012-10-30 13:47:45 +0200778QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
779
780static MemoryRegionSection limit(MemoryRegionSection section)
781{
782 section.size = MIN(section.offset_within_address_space + section.size,
783 MAX_PHYS_ADDR + 1)
784 - section.offset_within_address_space;
785
786 return section;
787}
788
Avi Kivityac1970f2012-10-03 16:22:53 +0200789static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200790{
Avi Kivityac1970f2012-10-03 16:22:53 +0200791 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity86a86232012-10-30 13:47:45 +0200792 MemoryRegionSection now = limit(*section), remain = limit(*section);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200793
794 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
795 || (now.size < TARGET_PAGE_SIZE)) {
796 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
797 - now.offset_within_address_space,
798 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200799 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200800 remain.size -= now.size;
801 remain.offset_within_address_space += now.size;
802 remain.offset_within_region += now.size;
803 }
Tyler Hall69b67642012-07-25 18:45:04 -0400804 while (remain.size >= TARGET_PAGE_SIZE) {
805 now = remain;
806 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
807 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200808 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400809 } else {
810 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200811 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400812 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200813 remain.size -= now.size;
814 remain.offset_within_address_space += now.size;
815 remain.offset_within_region += now.size;
816 }
817 now = remain;
818 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200819 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200820 }
821}
822
Sheng Yang62a27442010-01-26 19:21:16 +0800823void qemu_flush_coalesced_mmio_buffer(void)
824{
825 if (kvm_enabled())
826 kvm_flush_coalesced_mmio_buffer();
827}
828
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700829void qemu_mutex_lock_ramlist(void)
830{
831 qemu_mutex_lock(&ram_list.mutex);
832}
833
834void qemu_mutex_unlock_ramlist(void)
835{
836 qemu_mutex_unlock(&ram_list.mutex);
837}
838
Marcelo Tosattic9027602010-03-01 20:25:08 -0300839#if defined(__linux__) && !defined(TARGET_S390X)
840
841#include <sys/vfs.h>
842
843#define HUGETLBFS_MAGIC 0x958458f6
844
845static long gethugepagesize(const char *path)
846{
847 struct statfs fs;
848 int ret;
849
850 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900851 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300852 } while (ret != 0 && errno == EINTR);
853
854 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900855 perror(path);
856 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300857 }
858
859 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900860 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300861
862 return fs.f_bsize;
863}
864
Alex Williamson04b16652010-07-02 11:13:17 -0600865static void *file_ram_alloc(RAMBlock *block,
866 ram_addr_t memory,
867 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300868{
869 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500870 char *sanitized_name;
871 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300872 void *area;
873 int fd;
874#ifdef MAP_POPULATE
875 int flags;
876#endif
877 unsigned long hpagesize;
878
879 hpagesize = gethugepagesize(path);
880 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900881 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300882 }
883
884 if (memory < hpagesize) {
885 return NULL;
886 }
887
888 if (kvm_enabled() && !kvm_has_sync_mmu()) {
889 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
890 return NULL;
891 }
892
Peter Feiner8ca761f2013-03-04 13:54:25 -0500893 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
894 sanitized_name = g_strdup(block->mr->name);
895 for (c = sanitized_name; *c != '\0'; c++) {
896 if (*c == '/')
897 *c = '_';
898 }
899
900 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
901 sanitized_name);
902 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300903
904 fd = mkstemp(filename);
905 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900906 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100907 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900908 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300909 }
910 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100911 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300912
913 memory = (memory+hpagesize-1) & ~(hpagesize-1);
914
915 /*
916 * ftruncate is not supported by hugetlbfs in older
917 * hosts, so don't bother bailing out on errors.
918 * If anything goes wrong with it under other filesystems,
919 * mmap will fail.
920 */
921 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900922 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300923
924#ifdef MAP_POPULATE
925 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
926 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
927 * to sidestep this quirk.
928 */
929 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
930 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
931#else
932 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
933#endif
934 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900935 perror("file_ram_alloc: can't mmap RAM pages");
936 close(fd);
937 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300938 }
Alex Williamson04b16652010-07-02 11:13:17 -0600939 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300940 return area;
941}
942#endif
943
Alex Williamsond17b5282010-06-25 11:08:38 -0600944static ram_addr_t find_ram_offset(ram_addr_t size)
945{
Alex Williamson04b16652010-07-02 11:13:17 -0600946 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600947 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600948
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100949 assert(size != 0); /* it would hand out same offset multiple times */
950
Paolo Bonzinia3161032012-11-14 15:54:48 +0100951 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600952 return 0;
953
Paolo Bonzinia3161032012-11-14 15:54:48 +0100954 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000955 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600956
957 end = block->offset + block->length;
958
Paolo Bonzinia3161032012-11-14 15:54:48 +0100959 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600960 if (next_block->offset >= end) {
961 next = MIN(next, next_block->offset);
962 }
963 }
964 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600965 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600966 mingap = next - end;
967 }
968 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600969
970 if (offset == RAM_ADDR_MAX) {
971 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
972 (uint64_t)size);
973 abort();
974 }
975
Alex Williamson04b16652010-07-02 11:13:17 -0600976 return offset;
977}
978
Juan Quintela652d7ec2012-07-20 10:37:54 +0200979ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600980{
Alex Williamsond17b5282010-06-25 11:08:38 -0600981 RAMBlock *block;
982 ram_addr_t last = 0;
983
Paolo Bonzinia3161032012-11-14 15:54:48 +0100984 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600985 last = MAX(last, block->offset + block->length);
986
987 return last;
988}
989
Jason Baronddb97f12012-08-02 15:44:16 -0400990static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
991{
992 int ret;
993 QemuOpts *machine_opts;
994
995 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
996 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
997 if (machine_opts &&
998 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
999 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1000 if (ret) {
1001 perror("qemu_madvise");
1002 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1003 "but dump_guest_core=off specified\n");
1004 }
1005 }
1006}
1007
Avi Kivityc5705a72011-12-20 15:59:12 +02001008void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001009{
1010 RAMBlock *new_block, *block;
1011
Avi Kivityc5705a72011-12-20 15:59:12 +02001012 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001013 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001014 if (block->offset == addr) {
1015 new_block = block;
1016 break;
1017 }
1018 }
1019 assert(new_block);
1020 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001021
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001022 if (dev) {
1023 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001024 if (id) {
1025 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001026 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001027 }
1028 }
1029 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1030
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001031 /* This assumes the iothread lock is taken here too. */
1032 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001033 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001034 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001035 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1036 new_block->idstr);
1037 abort();
1038 }
1039 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001040 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001041}
1042
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001043static int memory_try_enable_merging(void *addr, size_t len)
1044{
1045 QemuOpts *opts;
1046
1047 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1048 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1049 /* disabled by the user */
1050 return 0;
1051 }
1052
1053 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1054}
1055
Avi Kivityc5705a72011-12-20 15:59:12 +02001056ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1057 MemoryRegion *mr)
1058{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001059 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001060
1061 size = TARGET_PAGE_ALIGN(size);
1062 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001063
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001064 /* This assumes the iothread lock is taken here too. */
1065 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001066 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001067 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001068 if (host) {
1069 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001070 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001071 } else {
1072 if (mem_path) {
1073#if defined (__linux__) && !defined(TARGET_S390X)
1074 new_block->host = file_ram_alloc(new_block, size, mem_path);
1075 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001076 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001077 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001078 }
1079#else
1080 fprintf(stderr, "-mem-path option unsupported\n");
1081 exit(1);
1082#endif
1083 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001084 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001085 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001086 } else if (kvm_enabled()) {
1087 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001088 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001089 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001090 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001091 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001092 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001093 }
1094 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001095 new_block->length = size;
1096
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001097 /* Keep the list sorted from biggest to smallest block. */
1098 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1099 if (block->length < new_block->length) {
1100 break;
1101 }
1102 }
1103 if (block) {
1104 QTAILQ_INSERT_BEFORE(block, new_block, next);
1105 } else {
1106 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1107 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001108 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001109
Umesh Deshpandef798b072011-08-18 11:41:17 -07001110 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001111 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001112
Anthony Liguori7267c092011-08-20 22:09:37 -05001113 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001114 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001115 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1116 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001117 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001118
Jason Baronddb97f12012-08-02 15:44:16 -04001119 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001120 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001121
Cam Macdonell84b89d72010-07-26 18:10:57 -06001122 if (kvm_enabled())
1123 kvm_setup_guest_memory(new_block->host, size);
1124
1125 return new_block->offset;
1126}
1127
Avi Kivityc5705a72011-12-20 15:59:12 +02001128ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001129{
Avi Kivityc5705a72011-12-20 15:59:12 +02001130 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001131}
bellarde9a1ab12007-02-08 23:08:38 +00001132
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001133void qemu_ram_free_from_ptr(ram_addr_t addr)
1134{
1135 RAMBlock *block;
1136
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001137 /* This assumes the iothread lock is taken here too. */
1138 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001139 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001140 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001141 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001142 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001143 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001144 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001145 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001146 }
1147 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001148 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001149}
1150
Anthony Liguoric227f092009-10-01 16:12:16 -05001151void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001152{
Alex Williamson04b16652010-07-02 11:13:17 -06001153 RAMBlock *block;
1154
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001155 /* This assumes the iothread lock is taken here too. */
1156 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001157 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001158 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001159 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001160 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001161 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001162 if (block->flags & RAM_PREALLOC_MASK) {
1163 ;
1164 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001165#if defined (__linux__) && !defined(TARGET_S390X)
1166 if (block->fd) {
1167 munmap(block->host, block->length);
1168 close(block->fd);
1169 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001170 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001171 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001172#else
1173 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001174#endif
1175 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001176 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001177 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001178 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001179 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001180 }
Alex Williamson04b16652010-07-02 11:13:17 -06001181 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001182 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001183 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001184 }
1185 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001186 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001187
bellarde9a1ab12007-02-08 23:08:38 +00001188}
1189
Huang Yingcd19cfa2011-03-02 08:56:19 +01001190#ifndef _WIN32
1191void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1192{
1193 RAMBlock *block;
1194 ram_addr_t offset;
1195 int flags;
1196 void *area, *vaddr;
1197
Paolo Bonzinia3161032012-11-14 15:54:48 +01001198 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001199 offset = addr - block->offset;
1200 if (offset < block->length) {
1201 vaddr = block->host + offset;
1202 if (block->flags & RAM_PREALLOC_MASK) {
1203 ;
1204 } else {
1205 flags = MAP_FIXED;
1206 munmap(vaddr, length);
1207 if (mem_path) {
1208#if defined(__linux__) && !defined(TARGET_S390X)
1209 if (block->fd) {
1210#ifdef MAP_POPULATE
1211 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1212 MAP_PRIVATE;
1213#else
1214 flags |= MAP_PRIVATE;
1215#endif
1216 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1217 flags, block->fd, offset);
1218 } else {
1219 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1220 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1221 flags, -1, 0);
1222 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001223#else
1224 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001225#endif
1226 } else {
1227#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1228 flags |= MAP_SHARED | MAP_ANONYMOUS;
1229 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1230 flags, -1, 0);
1231#else
1232 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1233 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1234 flags, -1, 0);
1235#endif
1236 }
1237 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001238 fprintf(stderr, "Could not remap addr: "
1239 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001240 length, addr);
1241 exit(1);
1242 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001243 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001244 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001245 }
1246 return;
1247 }
1248 }
1249}
1250#endif /* !_WIN32 */
1251
pbrookdc828ca2009-04-09 22:21:07 +00001252/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001253 With the exception of the softmmu code in this file, this should
1254 only be used for local memory (e.g. video ram) that the device owns,
1255 and knows it isn't going to access beyond the end of the block.
1256
1257 It should not be used for general purpose DMA.
1258 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1259 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001260void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001261{
pbrook94a6b542009-04-11 17:15:54 +00001262 RAMBlock *block;
1263
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001264 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001265 block = ram_list.mru_block;
1266 if (block && addr - block->offset < block->length) {
1267 goto found;
1268 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001269 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001270 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001271 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001272 }
pbrook94a6b542009-04-11 17:15:54 +00001273 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001274
1275 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1276 abort();
1277
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001278found:
1279 ram_list.mru_block = block;
1280 if (xen_enabled()) {
1281 /* We need to check if the requested address is in the RAM
1282 * because we don't want to map the entire memory in QEMU.
1283 * In that case just map until the end of the page.
1284 */
1285 if (block->offset == 0) {
1286 return xen_map_cache(addr, 0, 0);
1287 } else if (block->host == NULL) {
1288 block->host =
1289 xen_map_cache(block->offset, block->length, 1);
1290 }
1291 }
1292 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001293}
1294
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001295/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1296 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1297 *
1298 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001299 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001300static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001301{
1302 RAMBlock *block;
1303
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001304 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001305 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001306 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001307 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001308 /* We need to check if the requested address is in the RAM
1309 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001310 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001311 */
1312 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001313 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001314 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001315 block->host =
1316 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001317 }
1318 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001319 return block->host + (addr - block->offset);
1320 }
1321 }
1322
1323 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1324 abort();
1325
1326 return NULL;
1327}
1328
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001329/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1330 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001331static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001332{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001333 if (*size == 0) {
1334 return NULL;
1335 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001336 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001337 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001338 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001339 RAMBlock *block;
1340
Paolo Bonzinia3161032012-11-14 15:54:48 +01001341 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001342 if (addr - block->offset < block->length) {
1343 if (addr - block->offset + *size > block->length)
1344 *size = block->length - addr + block->offset;
1345 return block->host + (addr - block->offset);
1346 }
1347 }
1348
1349 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1350 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001351 }
1352}
1353
Marcelo Tosattie8902612010-10-11 15:31:19 -03001354int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001355{
pbrook94a6b542009-04-11 17:15:54 +00001356 RAMBlock *block;
1357 uint8_t *host = ptr;
1358
Jan Kiszka868bb332011-06-21 22:59:09 +02001359 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001360 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001361 return 0;
1362 }
1363
Paolo Bonzinia3161032012-11-14 15:54:48 +01001364 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001365 /* This case append when the block is not mapped. */
1366 if (block->host == NULL) {
1367 continue;
1368 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001369 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001370 *ram_addr = block->offset + (host - block->host);
1371 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001372 }
pbrook94a6b542009-04-11 17:15:54 +00001373 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001374
Marcelo Tosattie8902612010-10-11 15:31:19 -03001375 return -1;
1376}
Alex Williamsonf471a172010-06-11 11:11:42 -06001377
Marcelo Tosattie8902612010-10-11 15:31:19 -03001378/* Some of the softmmu routines need to translate from a host pointer
1379 (typically a TLB entry) back to a ram offset. */
1380ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1381{
1382 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001383
Marcelo Tosattie8902612010-10-11 15:31:19 -03001384 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1385 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1386 abort();
1387 }
1388 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001389}
1390
Avi Kivitya8170e52012-10-23 12:30:10 +02001391static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001392 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001393{
pbrook67d3b952006-12-18 05:03:52 +00001394#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001395 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001396#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001397#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001398 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001399#endif
1400 return 0;
1401}
1402
Avi Kivitya8170e52012-10-23 12:30:10 +02001403static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001404 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001405{
1406#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001407 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001408#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001409#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001410 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001411#endif
1412}
1413
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001414static const MemoryRegionOps unassigned_mem_ops = {
1415 .read = unassigned_mem_read,
1416 .write = unassigned_mem_write,
1417 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001418};
1419
Avi Kivitya8170e52012-10-23 12:30:10 +02001420static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001421 unsigned size)
1422{
1423 abort();
1424}
1425
Avi Kivitya8170e52012-10-23 12:30:10 +02001426static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001427 uint64_t value, unsigned size)
1428{
1429 abort();
1430}
1431
1432static const MemoryRegionOps error_mem_ops = {
1433 .read = error_mem_read,
1434 .write = error_mem_write,
1435 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001436};
1437
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001438static const MemoryRegionOps rom_mem_ops = {
1439 .read = error_mem_read,
1440 .write = unassigned_mem_write,
1441 .endianness = DEVICE_NATIVE_ENDIAN,
1442};
1443
Avi Kivitya8170e52012-10-23 12:30:10 +02001444static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001445 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001446{
bellard3a7d9292005-08-21 09:26:42 +00001447 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001448 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001449 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1450#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001451 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001452 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001453#endif
1454 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001455 switch (size) {
1456 case 1:
1457 stb_p(qemu_get_ram_ptr(ram_addr), val);
1458 break;
1459 case 2:
1460 stw_p(qemu_get_ram_ptr(ram_addr), val);
1461 break;
1462 case 4:
1463 stl_p(qemu_get_ram_ptr(ram_addr), val);
1464 break;
1465 default:
1466 abort();
1467 }
bellardf23db162005-08-21 19:12:28 +00001468 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001469 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001470 /* we remove the notdirty callback only if the code has been
1471 flushed */
1472 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001473 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001474}
1475
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001476static const MemoryRegionOps notdirty_mem_ops = {
1477 .read = error_mem_read,
1478 .write = notdirty_mem_write,
1479 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001480};
1481
pbrook0f459d12008-06-09 00:20:13 +00001482/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001483static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001484{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001485 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001486 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001487 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001488 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001489 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001490
aliguori06d55cc2008-11-18 20:24:06 +00001491 if (env->watchpoint_hit) {
1492 /* We re-entered the check after replacing the TB. Now raise
1493 * the debug interrupt so that is will trigger after the
1494 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001495 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001496 return;
1497 }
pbrook2e70f6e2008-06-29 01:03:05 +00001498 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001499 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001500 if ((vaddr == (wp->vaddr & len_mask) ||
1501 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001502 wp->flags |= BP_WATCHPOINT_HIT;
1503 if (!env->watchpoint_hit) {
1504 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001505 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001506 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1507 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001508 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001509 } else {
1510 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1511 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001512 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001513 }
aliguori06d55cc2008-11-18 20:24:06 +00001514 }
aliguori6e140f22008-11-18 20:37:55 +00001515 } else {
1516 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001517 }
1518 }
1519}
1520
pbrook6658ffb2007-03-16 23:58:11 +00001521/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1522 so these check for a hit then pass through to the normal out-of-line
1523 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001524static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001525 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001526{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001527 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1528 switch (size) {
1529 case 1: return ldub_phys(addr);
1530 case 2: return lduw_phys(addr);
1531 case 4: return ldl_phys(addr);
1532 default: abort();
1533 }
pbrook6658ffb2007-03-16 23:58:11 +00001534}
1535
Avi Kivitya8170e52012-10-23 12:30:10 +02001536static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001537 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001538{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001539 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1540 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001541 case 1:
1542 stb_phys(addr, val);
1543 break;
1544 case 2:
1545 stw_phys(addr, val);
1546 break;
1547 case 4:
1548 stl_phys(addr, val);
1549 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001550 default: abort();
1551 }
pbrook6658ffb2007-03-16 23:58:11 +00001552}
1553
Avi Kivity1ec9b902012-01-02 12:47:48 +02001554static const MemoryRegionOps watch_mem_ops = {
1555 .read = watch_mem_read,
1556 .write = watch_mem_write,
1557 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001558};
pbrook6658ffb2007-03-16 23:58:11 +00001559
Avi Kivitya8170e52012-10-23 12:30:10 +02001560static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001561 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001562{
Avi Kivity70c68e42012-01-02 12:32:48 +02001563 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001564 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001565 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001566#if defined(DEBUG_SUBPAGE)
1567 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1568 mmio, len, addr, idx);
1569#endif
blueswir1db7b5422007-05-26 17:36:03 +00001570
Avi Kivity5312bd82012-02-12 18:32:55 +02001571 section = &phys_sections[mmio->sub_section[idx]];
1572 addr += mmio->base;
1573 addr -= section->offset_within_address_space;
1574 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001575 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001576}
1577
Avi Kivitya8170e52012-10-23 12:30:10 +02001578static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001579 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001580{
Avi Kivity70c68e42012-01-02 12:32:48 +02001581 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001582 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001583 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001584#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001585 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1586 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001587 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001588#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001589
Avi Kivity5312bd82012-02-12 18:32:55 +02001590 section = &phys_sections[mmio->sub_section[idx]];
1591 addr += mmio->base;
1592 addr -= section->offset_within_address_space;
1593 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001594 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001595}
1596
Avi Kivity70c68e42012-01-02 12:32:48 +02001597static const MemoryRegionOps subpage_ops = {
1598 .read = subpage_read,
1599 .write = subpage_write,
1600 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001601};
1602
Avi Kivitya8170e52012-10-23 12:30:10 +02001603static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001604 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001605{
1606 ram_addr_t raddr = addr;
1607 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001608 switch (size) {
1609 case 1: return ldub_p(ptr);
1610 case 2: return lduw_p(ptr);
1611 case 4: return ldl_p(ptr);
1612 default: abort();
1613 }
Andreas Färber56384e82011-11-30 16:26:21 +01001614}
1615
Avi Kivitya8170e52012-10-23 12:30:10 +02001616static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001617 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001618{
1619 ram_addr_t raddr = addr;
1620 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001621 switch (size) {
1622 case 1: return stb_p(ptr, value);
1623 case 2: return stw_p(ptr, value);
1624 case 4: return stl_p(ptr, value);
1625 default: abort();
1626 }
Andreas Färber56384e82011-11-30 16:26:21 +01001627}
1628
Avi Kivityde712f92012-01-02 12:41:07 +02001629static const MemoryRegionOps subpage_ram_ops = {
1630 .read = subpage_ram_read,
1631 .write = subpage_ram_write,
1632 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001633};
1634
Anthony Liguoric227f092009-10-01 16:12:16 -05001635static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001636 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001637{
1638 int idx, eidx;
1639
1640 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1641 return -1;
1642 idx = SUBPAGE_IDX(start);
1643 eidx = SUBPAGE_IDX(end);
1644#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001645 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001646 mmio, start, end, idx, eidx, memory);
1647#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001648 if (memory_region_is_ram(phys_sections[section].mr)) {
1649 MemoryRegionSection new_section = phys_sections[section];
1650 new_section.mr = &io_mem_subpage_ram;
1651 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001652 }
blueswir1db7b5422007-05-26 17:36:03 +00001653 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001654 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001655 }
1656
1657 return 0;
1658}
1659
Avi Kivitya8170e52012-10-23 12:30:10 +02001660static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001661{
Anthony Liguoric227f092009-10-01 16:12:16 -05001662 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001663
Anthony Liguori7267c092011-08-20 22:09:37 -05001664 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001665
1666 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001667 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1668 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001669 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001670#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001671 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1672 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001673#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001674 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001675
1676 return mmio;
1677}
1678
Avi Kivity5312bd82012-02-12 18:32:55 +02001679static uint16_t dummy_section(MemoryRegion *mr)
1680{
1681 MemoryRegionSection section = {
1682 .mr = mr,
1683 .offset_within_address_space = 0,
1684 .offset_within_region = 0,
1685 .size = UINT64_MAX,
1686 };
1687
1688 return phys_section_add(&section);
1689}
1690
Avi Kivitya8170e52012-10-23 12:30:10 +02001691MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001692{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001693 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001694}
1695
Avi Kivitye9179ce2009-06-14 11:38:52 +03001696static void io_mem_init(void)
1697{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001698 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001699 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1700 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1701 "unassigned", UINT64_MAX);
1702 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1703 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001704 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1705 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001706 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1707 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001708}
1709
Avi Kivityac1970f2012-10-03 16:22:53 +02001710static void mem_begin(MemoryListener *listener)
1711{
1712 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1713
1714 destroy_all_mappings(d);
1715 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1716}
1717
Avi Kivity50c1e142012-02-08 21:36:02 +02001718static void core_begin(MemoryListener *listener)
1719{
Avi Kivity5312bd82012-02-12 18:32:55 +02001720 phys_sections_clear();
1721 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001722 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1723 phys_section_rom = dummy_section(&io_mem_rom);
1724 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001725}
1726
Avi Kivity1d711482012-10-02 18:54:45 +02001727static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001728{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001729 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001730
1731 /* since each CPU stores ram addresses in its TLB cache, we must
1732 reset the modified entries */
1733 /* XXX: slow ! */
1734 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1735 tlb_flush(env, 1);
1736 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001737}
1738
Avi Kivity93632742012-02-08 16:54:16 +02001739static void core_log_global_start(MemoryListener *listener)
1740{
1741 cpu_physical_memory_set_dirty_tracking(1);
1742}
1743
1744static void core_log_global_stop(MemoryListener *listener)
1745{
1746 cpu_physical_memory_set_dirty_tracking(0);
1747}
1748
Avi Kivity4855d412012-02-08 21:16:05 +02001749static void io_region_add(MemoryListener *listener,
1750 MemoryRegionSection *section)
1751{
Avi Kivitya2d33522012-03-05 17:40:12 +02001752 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1753
1754 mrio->mr = section->mr;
1755 mrio->offset = section->offset_within_region;
1756 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001757 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001758 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001759}
1760
1761static void io_region_del(MemoryListener *listener,
1762 MemoryRegionSection *section)
1763{
1764 isa_unassign_ioport(section->offset_within_address_space, section->size);
1765}
1766
Avi Kivity93632742012-02-08 16:54:16 +02001767static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001768 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001769 .log_global_start = core_log_global_start,
1770 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001771 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001772};
1773
Avi Kivity4855d412012-02-08 21:16:05 +02001774static MemoryListener io_memory_listener = {
1775 .region_add = io_region_add,
1776 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001777 .priority = 0,
1778};
1779
Avi Kivity1d711482012-10-02 18:54:45 +02001780static MemoryListener tcg_memory_listener = {
1781 .commit = tcg_commit,
1782};
1783
Avi Kivityac1970f2012-10-03 16:22:53 +02001784void address_space_init_dispatch(AddressSpace *as)
1785{
1786 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1787
1788 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1789 d->listener = (MemoryListener) {
1790 .begin = mem_begin,
1791 .region_add = mem_add,
1792 .region_nop = mem_add,
1793 .priority = 0,
1794 };
1795 as->dispatch = d;
1796 memory_listener_register(&d->listener, as);
1797}
1798
Avi Kivity83f3c252012-10-07 12:59:55 +02001799void address_space_destroy_dispatch(AddressSpace *as)
1800{
1801 AddressSpaceDispatch *d = as->dispatch;
1802
1803 memory_listener_unregister(&d->listener);
1804 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1805 g_free(d);
1806 as->dispatch = NULL;
1807}
1808
Avi Kivity62152b82011-07-26 14:26:14 +03001809static void memory_map_init(void)
1810{
Anthony Liguori7267c092011-08-20 22:09:37 -05001811 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001812 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001813 address_space_init(&address_space_memory, system_memory);
1814 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001815
Anthony Liguori7267c092011-08-20 22:09:37 -05001816 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001817 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001818 address_space_init(&address_space_io, system_io);
1819 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001820
Avi Kivityf6790af2012-10-02 20:13:51 +02001821 memory_listener_register(&core_memory_listener, &address_space_memory);
1822 memory_listener_register(&io_memory_listener, &address_space_io);
1823 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001824
1825 dma_context_init(&dma_context_memory, &address_space_memory,
1826 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001827}
1828
1829MemoryRegion *get_system_memory(void)
1830{
1831 return system_memory;
1832}
1833
Avi Kivity309cb472011-08-08 16:09:03 +03001834MemoryRegion *get_system_io(void)
1835{
1836 return system_io;
1837}
1838
pbrooke2eef172008-06-08 01:09:01 +00001839#endif /* !defined(CONFIG_USER_ONLY) */
1840
bellard13eb76e2004-01-24 15:23:36 +00001841/* physical memory access (slow version, mainly for debug) */
1842#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001843int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001844 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001845{
1846 int l, flags;
1847 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001848 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001849
1850 while (len > 0) {
1851 page = addr & TARGET_PAGE_MASK;
1852 l = (page + TARGET_PAGE_SIZE) - addr;
1853 if (l > len)
1854 l = len;
1855 flags = page_get_flags(page);
1856 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001857 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001858 if (is_write) {
1859 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001860 return -1;
bellard579a97f2007-11-11 14:26:47 +00001861 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001862 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001863 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001864 memcpy(p, buf, l);
1865 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001866 } else {
1867 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001868 return -1;
bellard579a97f2007-11-11 14:26:47 +00001869 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001870 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001871 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001872 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001873 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001874 }
1875 len -= l;
1876 buf += l;
1877 addr += l;
1878 }
Paul Brooka68fe892010-03-01 00:08:59 +00001879 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001880}
bellard8df1cd02005-01-28 22:37:22 +00001881
bellard13eb76e2004-01-24 15:23:36 +00001882#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001883
Avi Kivitya8170e52012-10-23 12:30:10 +02001884static void invalidate_and_set_dirty(hwaddr addr,
1885 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001886{
1887 if (!cpu_physical_memory_is_dirty(addr)) {
1888 /* invalidate code */
1889 tb_invalidate_phys_page_range(addr, addr + length, 0);
1890 /* set dirty bit */
1891 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1892 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001893 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001894}
1895
Avi Kivitya8170e52012-10-23 12:30:10 +02001896void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001897 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001898{
Avi Kivityac1970f2012-10-03 16:22:53 +02001899 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001900 int l;
bellard13eb76e2004-01-24 15:23:36 +00001901 uint8_t *ptr;
1902 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001903 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001904 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001905
bellard13eb76e2004-01-24 15:23:36 +00001906 while (len > 0) {
1907 page = addr & TARGET_PAGE_MASK;
1908 l = (page + TARGET_PAGE_SIZE) - addr;
1909 if (l > len)
1910 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001911 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001912
bellard13eb76e2004-01-24 15:23:36 +00001913 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001914 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001915 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001916 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001917 /* XXX: could force cpu_single_env to NULL to avoid
1918 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001919 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001920 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001921 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001922 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001923 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001924 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001925 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001926 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001927 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001928 l = 2;
1929 } else {
bellard1c213d12005-09-03 10:49:04 +00001930 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001931 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001932 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001933 l = 1;
1934 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001935 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001936 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001937 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001938 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001939 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001940 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001941 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001942 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001943 }
1944 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001945 if (!(memory_region_is_ram(section->mr) ||
1946 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001947 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001948 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001949 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001950 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001951 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001952 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001953 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001954 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001955 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001956 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001957 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001958 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001959 l = 2;
1960 } else {
bellard1c213d12005-09-03 10:49:04 +00001961 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001962 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001963 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001964 l = 1;
1965 }
1966 } else {
1967 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001968 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001969 + memory_region_section_addr(section,
1970 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001971 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001972 }
1973 }
1974 len -= l;
1975 buf += l;
1976 addr += l;
1977 }
1978}
bellard8df1cd02005-01-28 22:37:22 +00001979
Avi Kivitya8170e52012-10-23 12:30:10 +02001980void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001981 const uint8_t *buf, int len)
1982{
1983 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1984}
1985
1986/**
1987 * address_space_read: read from an address space.
1988 *
1989 * @as: #AddressSpace to be accessed
1990 * @addr: address within that address space
1991 * @buf: buffer with the data transferred
1992 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001993void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001994{
1995 address_space_rw(as, addr, buf, len, false);
1996}
1997
1998
Avi Kivitya8170e52012-10-23 12:30:10 +02001999void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002000 int len, int is_write)
2001{
2002 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
2003}
2004
bellardd0ecd2a2006-04-23 17:14:48 +00002005/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002006void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002007 const uint8_t *buf, int len)
2008{
Avi Kivityac1970f2012-10-03 16:22:53 +02002009 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00002010 int l;
2011 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02002012 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002013 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00002014
bellardd0ecd2a2006-04-23 17:14:48 +00002015 while (len > 0) {
2016 page = addr & TARGET_PAGE_MASK;
2017 l = (page + TARGET_PAGE_SIZE) - addr;
2018 if (l > len)
2019 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002020 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002021
Blue Swirlcc5bea62012-04-14 14:56:48 +00002022 if (!(memory_region_is_ram(section->mr) ||
2023 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002024 /* do nothing */
2025 } else {
2026 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002027 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002028 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002029 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002030 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002031 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002032 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002033 }
2034 len -= l;
2035 buf += l;
2036 addr += l;
2037 }
2038}
2039
aliguori6d16c2f2009-01-22 16:59:11 +00002040typedef struct {
2041 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002042 hwaddr addr;
2043 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002044} BounceBuffer;
2045
2046static BounceBuffer bounce;
2047
aliguoriba223c22009-01-22 16:59:16 +00002048typedef struct MapClient {
2049 void *opaque;
2050 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002051 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002052} MapClient;
2053
Blue Swirl72cf2d42009-09-12 07:36:22 +00002054static QLIST_HEAD(map_client_list, MapClient) map_client_list
2055 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002056
2057void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2058{
Anthony Liguori7267c092011-08-20 22:09:37 -05002059 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002060
2061 client->opaque = opaque;
2062 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002063 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002064 return client;
2065}
2066
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002067static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002068{
2069 MapClient *client = (MapClient *)_client;
2070
Blue Swirl72cf2d42009-09-12 07:36:22 +00002071 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002072 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002073}
2074
2075static void cpu_notify_map_clients(void)
2076{
2077 MapClient *client;
2078
Blue Swirl72cf2d42009-09-12 07:36:22 +00002079 while (!QLIST_EMPTY(&map_client_list)) {
2080 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002081 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002082 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002083 }
2084}
2085
aliguori6d16c2f2009-01-22 16:59:11 +00002086/* Map a physical memory region into a host virtual address.
2087 * May map a subset of the requested range, given by and returned in *plen.
2088 * May return NULL if resources needed to perform the mapping are exhausted.
2089 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002090 * Use cpu_register_map_client() to know when retrying the map operation is
2091 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002092 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002093void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002094 hwaddr addr,
2095 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002096 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002097{
Avi Kivityac1970f2012-10-03 16:22:53 +02002098 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002099 hwaddr len = *plen;
2100 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002101 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002102 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002103 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002104 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002105 ram_addr_t rlen;
2106 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002107
2108 while (len > 0) {
2109 page = addr & TARGET_PAGE_MASK;
2110 l = (page + TARGET_PAGE_SIZE) - addr;
2111 if (l > len)
2112 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002113 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002114
Avi Kivityf3705d52012-03-08 16:16:34 +02002115 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002116 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002117 break;
2118 }
2119 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2120 bounce.addr = addr;
2121 bounce.len = l;
2122 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002123 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002124 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002125
2126 *plen = l;
2127 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002128 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002129 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002130 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002131 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002132 }
aliguori6d16c2f2009-01-22 16:59:11 +00002133
2134 len -= l;
2135 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002136 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002137 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002138 rlen = todo;
2139 ret = qemu_ram_ptr_length(raddr, &rlen);
2140 *plen = rlen;
2141 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002142}
2143
Avi Kivityac1970f2012-10-03 16:22:53 +02002144/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002145 * Will also mark the memory as dirty if is_write == 1. access_len gives
2146 * the amount of memory that was actually read or written by the caller.
2147 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002148void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2149 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002150{
2151 if (buffer != bounce.buffer) {
2152 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002153 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002154 while (access_len) {
2155 unsigned l;
2156 l = TARGET_PAGE_SIZE;
2157 if (l > access_len)
2158 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002159 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002160 addr1 += l;
2161 access_len -= l;
2162 }
2163 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002164 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002165 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002166 }
aliguori6d16c2f2009-01-22 16:59:11 +00002167 return;
2168 }
2169 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002170 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002171 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002172 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002173 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002174 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002175}
bellardd0ecd2a2006-04-23 17:14:48 +00002176
Avi Kivitya8170e52012-10-23 12:30:10 +02002177void *cpu_physical_memory_map(hwaddr addr,
2178 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002179 int is_write)
2180{
2181 return address_space_map(&address_space_memory, addr, plen, is_write);
2182}
2183
Avi Kivitya8170e52012-10-23 12:30:10 +02002184void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2185 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002186{
2187 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2188}
2189
bellard8df1cd02005-01-28 22:37:22 +00002190/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002191static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002192 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002193{
bellard8df1cd02005-01-28 22:37:22 +00002194 uint8_t *ptr;
2195 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002196 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002197
Avi Kivityac1970f2012-10-03 16:22:53 +02002198 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002199
Blue Swirlcc5bea62012-04-14 14:56:48 +00002200 if (!(memory_region_is_ram(section->mr) ||
2201 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002202 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002203 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002204 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002205#if defined(TARGET_WORDS_BIGENDIAN)
2206 if (endian == DEVICE_LITTLE_ENDIAN) {
2207 val = bswap32(val);
2208 }
2209#else
2210 if (endian == DEVICE_BIG_ENDIAN) {
2211 val = bswap32(val);
2212 }
2213#endif
bellard8df1cd02005-01-28 22:37:22 +00002214 } else {
2215 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002216 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002217 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002218 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002219 switch (endian) {
2220 case DEVICE_LITTLE_ENDIAN:
2221 val = ldl_le_p(ptr);
2222 break;
2223 case DEVICE_BIG_ENDIAN:
2224 val = ldl_be_p(ptr);
2225 break;
2226 default:
2227 val = ldl_p(ptr);
2228 break;
2229 }
bellard8df1cd02005-01-28 22:37:22 +00002230 }
2231 return val;
2232}
2233
Avi Kivitya8170e52012-10-23 12:30:10 +02002234uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002235{
2236 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2237}
2238
Avi Kivitya8170e52012-10-23 12:30:10 +02002239uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002240{
2241 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2242}
2243
Avi Kivitya8170e52012-10-23 12:30:10 +02002244uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002245{
2246 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2247}
2248
bellard84b7b8e2005-11-28 21:19:04 +00002249/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002250static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002251 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002252{
bellard84b7b8e2005-11-28 21:19:04 +00002253 uint8_t *ptr;
2254 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002255 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002256
Avi Kivityac1970f2012-10-03 16:22:53 +02002257 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002258
Blue Swirlcc5bea62012-04-14 14:56:48 +00002259 if (!(memory_region_is_ram(section->mr) ||
2260 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002261 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002262 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002263
2264 /* XXX This is broken when device endian != cpu endian.
2265 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002266#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002267 val = io_mem_read(section->mr, addr, 4) << 32;
2268 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002269#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002270 val = io_mem_read(section->mr, addr, 4);
2271 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002272#endif
2273 } else {
2274 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002275 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002276 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002277 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002278 switch (endian) {
2279 case DEVICE_LITTLE_ENDIAN:
2280 val = ldq_le_p(ptr);
2281 break;
2282 case DEVICE_BIG_ENDIAN:
2283 val = ldq_be_p(ptr);
2284 break;
2285 default:
2286 val = ldq_p(ptr);
2287 break;
2288 }
bellard84b7b8e2005-11-28 21:19:04 +00002289 }
2290 return val;
2291}
2292
Avi Kivitya8170e52012-10-23 12:30:10 +02002293uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002294{
2295 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2296}
2297
Avi Kivitya8170e52012-10-23 12:30:10 +02002298uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002299{
2300 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2301}
2302
Avi Kivitya8170e52012-10-23 12:30:10 +02002303uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002304{
2305 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2306}
2307
bellardaab33092005-10-30 20:48:42 +00002308/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002309uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002310{
2311 uint8_t val;
2312 cpu_physical_memory_read(addr, &val, 1);
2313 return val;
2314}
2315
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002316/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002317static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002318 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002319{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002320 uint8_t *ptr;
2321 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002322 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002323
Avi Kivityac1970f2012-10-03 16:22:53 +02002324 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002325
Blue Swirlcc5bea62012-04-14 14:56:48 +00002326 if (!(memory_region_is_ram(section->mr) ||
2327 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002328 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002329 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002330 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002331#if defined(TARGET_WORDS_BIGENDIAN)
2332 if (endian == DEVICE_LITTLE_ENDIAN) {
2333 val = bswap16(val);
2334 }
2335#else
2336 if (endian == DEVICE_BIG_ENDIAN) {
2337 val = bswap16(val);
2338 }
2339#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002340 } else {
2341 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002342 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002343 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002344 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002345 switch (endian) {
2346 case DEVICE_LITTLE_ENDIAN:
2347 val = lduw_le_p(ptr);
2348 break;
2349 case DEVICE_BIG_ENDIAN:
2350 val = lduw_be_p(ptr);
2351 break;
2352 default:
2353 val = lduw_p(ptr);
2354 break;
2355 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002356 }
2357 return val;
bellardaab33092005-10-30 20:48:42 +00002358}
2359
Avi Kivitya8170e52012-10-23 12:30:10 +02002360uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002361{
2362 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2363}
2364
Avi Kivitya8170e52012-10-23 12:30:10 +02002365uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002366{
2367 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2368}
2369
Avi Kivitya8170e52012-10-23 12:30:10 +02002370uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002371{
2372 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2373}
2374
bellard8df1cd02005-01-28 22:37:22 +00002375/* warning: addr must be aligned. The ram page is not masked as dirty
2376 and the code inside is not invalidated. It is useful if the dirty
2377 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002378void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002379{
bellard8df1cd02005-01-28 22:37:22 +00002380 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002381 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002382
Avi Kivityac1970f2012-10-03 16:22:53 +02002383 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002384
Avi Kivityf3705d52012-03-08 16:16:34 +02002385 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002386 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002387 if (memory_region_is_ram(section->mr)) {
2388 section = &phys_sections[phys_section_rom];
2389 }
2390 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002391 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002392 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002393 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002394 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002395 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002396 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002397
2398 if (unlikely(in_migration)) {
2399 if (!cpu_physical_memory_is_dirty(addr1)) {
2400 /* invalidate code */
2401 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2402 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002403 cpu_physical_memory_set_dirty_flags(
2404 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002405 }
2406 }
bellard8df1cd02005-01-28 22:37:22 +00002407 }
2408}
2409
2410/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002411static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002412 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002413{
bellard8df1cd02005-01-28 22:37:22 +00002414 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002415 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002416
Avi Kivityac1970f2012-10-03 16:22:53 +02002417 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002418
Avi Kivityf3705d52012-03-08 16:16:34 +02002419 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002420 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002421 if (memory_region_is_ram(section->mr)) {
2422 section = &phys_sections[phys_section_rom];
2423 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002424#if defined(TARGET_WORDS_BIGENDIAN)
2425 if (endian == DEVICE_LITTLE_ENDIAN) {
2426 val = bswap32(val);
2427 }
2428#else
2429 if (endian == DEVICE_BIG_ENDIAN) {
2430 val = bswap32(val);
2431 }
2432#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002433 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002434 } else {
2435 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002436 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002437 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002438 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002439 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002440 switch (endian) {
2441 case DEVICE_LITTLE_ENDIAN:
2442 stl_le_p(ptr, val);
2443 break;
2444 case DEVICE_BIG_ENDIAN:
2445 stl_be_p(ptr, val);
2446 break;
2447 default:
2448 stl_p(ptr, val);
2449 break;
2450 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002451 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002452 }
2453}
2454
Avi Kivitya8170e52012-10-23 12:30:10 +02002455void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002456{
2457 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2458}
2459
Avi Kivitya8170e52012-10-23 12:30:10 +02002460void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002461{
2462 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2463}
2464
Avi Kivitya8170e52012-10-23 12:30:10 +02002465void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466{
2467 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2468}
2469
bellardaab33092005-10-30 20:48:42 +00002470/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002471void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002472{
2473 uint8_t v = val;
2474 cpu_physical_memory_write(addr, &v, 1);
2475}
2476
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002477/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002478static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002479 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002480{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002481 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002482 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002483
Avi Kivityac1970f2012-10-03 16:22:53 +02002484 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002485
Avi Kivityf3705d52012-03-08 16:16:34 +02002486 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002487 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002488 if (memory_region_is_ram(section->mr)) {
2489 section = &phys_sections[phys_section_rom];
2490 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002491#if defined(TARGET_WORDS_BIGENDIAN)
2492 if (endian == DEVICE_LITTLE_ENDIAN) {
2493 val = bswap16(val);
2494 }
2495#else
2496 if (endian == DEVICE_BIG_ENDIAN) {
2497 val = bswap16(val);
2498 }
2499#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002500 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002501 } else {
2502 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002503 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002504 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002505 /* RAM case */
2506 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002507 switch (endian) {
2508 case DEVICE_LITTLE_ENDIAN:
2509 stw_le_p(ptr, val);
2510 break;
2511 case DEVICE_BIG_ENDIAN:
2512 stw_be_p(ptr, val);
2513 break;
2514 default:
2515 stw_p(ptr, val);
2516 break;
2517 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002518 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002519 }
bellardaab33092005-10-30 20:48:42 +00002520}
2521
Avi Kivitya8170e52012-10-23 12:30:10 +02002522void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002523{
2524 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2525}
2526
Avi Kivitya8170e52012-10-23 12:30:10 +02002527void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002528{
2529 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2530}
2531
Avi Kivitya8170e52012-10-23 12:30:10 +02002532void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002533{
2534 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2535}
2536
bellardaab33092005-10-30 20:48:42 +00002537/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002538void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002539{
2540 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002541 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002542}
2543
Avi Kivitya8170e52012-10-23 12:30:10 +02002544void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002545{
2546 val = cpu_to_le64(val);
2547 cpu_physical_memory_write(addr, &val, 8);
2548}
2549
Avi Kivitya8170e52012-10-23 12:30:10 +02002550void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002551{
2552 val = cpu_to_be64(val);
2553 cpu_physical_memory_write(addr, &val, 8);
2554}
2555
aliguori5e2972f2009-03-28 17:51:36 +00002556/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002557int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002558 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002559{
2560 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002561 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002562 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002563
2564 while (len > 0) {
2565 page = addr & TARGET_PAGE_MASK;
2566 phys_addr = cpu_get_phys_page_debug(env, page);
2567 /* if no physical page mapped, return an error */
2568 if (phys_addr == -1)
2569 return -1;
2570 l = (page + TARGET_PAGE_SIZE) - addr;
2571 if (l > len)
2572 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002573 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002574 if (is_write)
2575 cpu_physical_memory_write_rom(phys_addr, buf, l);
2576 else
aliguori5e2972f2009-03-28 17:51:36 +00002577 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002578 len -= l;
2579 buf += l;
2580 addr += l;
2581 }
2582 return 0;
2583}
Paul Brooka68fe892010-03-01 00:08:59 +00002584#endif
bellard13eb76e2004-01-24 15:23:36 +00002585
Blue Swirl8e4a4242013-01-06 18:30:17 +00002586#if !defined(CONFIG_USER_ONLY)
2587
2588/*
2589 * A helper function for the _utterly broken_ virtio device model to find out if
2590 * it's running on a big endian machine. Don't do this at home kids!
2591 */
2592bool virtio_is_big_endian(void);
2593bool virtio_is_big_endian(void)
2594{
2595#if defined(TARGET_WORDS_BIGENDIAN)
2596 return true;
2597#else
2598 return false;
2599#endif
2600}
2601
2602#endif
2603
Wen Congyang76f35532012-05-07 12:04:18 +08002604#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002605bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002606{
2607 MemoryRegionSection *section;
2608
Avi Kivityac1970f2012-10-03 16:22:53 +02002609 section = phys_page_find(address_space_memory.dispatch,
2610 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002611
2612 return !(memory_region_is_ram(section->mr) ||
2613 memory_region_is_romd(section->mr));
2614}
2615#endif