blob: 1355661963f657f9c5d392214314cfa0d557f880 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +020070static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Avi Kivitya8170e52012-10-23 12:30:10 +0200185MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200190 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200191
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200193 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 goto not_found;
195 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200197 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200198 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200199
Avi Kivityc19e8802012-02-13 20:25:31 +0200200 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200201not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200202 return &phys_sections[s_index];
203}
204
Blue Swirle5548612012-04-21 13:08:33 +0000205bool memory_region_is_unassigned(MemoryRegion *mr)
206{
207 return mr != &io_mem_ram && mr != &io_mem_rom
208 && mr != &io_mem_notdirty && !mr->rom_device
209 && mr != &io_mem_watch;
210}
bellard9fa3e852004-01-04 18:06:42 +0000211#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000212
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200213void cpu_exec_init_all(void)
214{
215#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700216 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200217 memory_map_init();
218 io_mem_init();
219#endif
220}
221
Andreas Färberb170fce2013-01-20 20:23:22 +0100222#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000223
Juan Quintelae59fb372009-09-29 22:48:21 +0200224static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200225{
Andreas Färber259186a2013-01-17 18:51:17 +0100226 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200227
aurel323098dba2009-03-07 21:28:24 +0000228 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
229 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100230 cpu->interrupt_request &= ~0x01;
231 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000232
233 return 0;
234}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200235
236static const VMStateDescription vmstate_cpu_common = {
237 .name = "cpu_common",
238 .version_id = 1,
239 .minimum_version_id = 1,
240 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241 .post_load = cpu_common_post_load,
242 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100243 VMSTATE_UINT32(halted, CPUState),
244 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200245 VMSTATE_END_OF_LIST()
246 }
247};
Andreas Färberb170fce2013-01-20 20:23:22 +0100248#else
249#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000250#endif
251
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100252CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400253{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100254 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100255 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400256
257 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100258 cpu = ENV_GET_CPU(env);
259 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400260 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100261 }
Glauber Costa950f1472009-06-09 12:15:18 -0400262 env = env->next_cpu;
263 }
264
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100265 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400266}
267
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200268void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
269{
270 CPUArchState *env = first_cpu;
271
272 while (env) {
273 func(ENV_GET_CPU(env), data);
274 env = env->next_cpu;
275 }
276}
277
Andreas Färber9349b4f2012-03-14 01:38:32 +0100278void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000279{
Andreas Färber9f09e182012-05-03 06:59:07 +0200280 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100281 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100282 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000283 int cpu_index;
284
pbrookc2764712009-03-07 15:24:59 +0000285#if defined(CONFIG_USER_ONLY)
286 cpu_list_lock();
287#endif
bellard6a00d602005-11-21 23:25:50 +0000288 env->next_cpu = NULL;
289 penv = &first_cpu;
290 cpu_index = 0;
291 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700292 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000293 cpu_index++;
294 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100295 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100296 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000297 QTAILQ_INIT(&env->breakpoints);
298 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100299#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200300 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100301#endif
bellard6a00d602005-11-21 23:25:50 +0000302 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000303#if defined(CONFIG_USER_ONLY)
304 cpu_list_unlock();
305#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100306 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000307#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600308 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000309 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100310 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000311#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100312 if (cc->vmsd != NULL) {
313 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
314 }
bellardfd6ce8f2003-05-14 19:00:11 +0000315}
316
bellard1fddef42005-04-17 19:16:13 +0000317#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000318#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100319static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000320{
321 tb_invalidate_phys_page_range(pc, pc + 1, 0);
322}
323#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400324static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
325{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400326 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
327 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400328}
bellardc27004e2005-01-03 23:35:10 +0000329#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000330#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000331
Paul Brookc527ee82010-03-01 03:31:14 +0000332#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100333void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000334
335{
336}
337
Andreas Färber9349b4f2012-03-14 01:38:32 +0100338int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000339 int flags, CPUWatchpoint **watchpoint)
340{
341 return -ENOSYS;
342}
343#else
pbrook6658ffb2007-03-16 23:58:11 +0000344/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100345int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000346 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000347{
aliguorib4051332008-11-18 20:14:20 +0000348 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000349 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000350
aliguorib4051332008-11-18 20:14:20 +0000351 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400352 if ((len & (len - 1)) || (addr & ~len_mask) ||
353 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000354 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
355 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
356 return -EINVAL;
357 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500358 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000359
aliguoria1d1bb32008-11-18 20:07:32 +0000360 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000361 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000362 wp->flags = flags;
363
aliguori2dc9f412008-11-18 20:56:59 +0000364 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000365 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000366 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000367 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000368 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000369
pbrook6658ffb2007-03-16 23:58:11 +0000370 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000371
372 if (watchpoint)
373 *watchpoint = wp;
374 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000375}
376
aliguoria1d1bb32008-11-18 20:07:32 +0000377/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100378int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000379 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000380{
aliguorib4051332008-11-18 20:14:20 +0000381 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000382 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000383
Blue Swirl72cf2d42009-09-12 07:36:22 +0000384 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000385 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000386 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000387 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000388 return 0;
389 }
390 }
aliguoria1d1bb32008-11-18 20:07:32 +0000391 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000392}
393
aliguoria1d1bb32008-11-18 20:07:32 +0000394/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100395void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000396{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000397 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000398
aliguoria1d1bb32008-11-18 20:07:32 +0000399 tlb_flush_page(env, watchpoint->vaddr);
400
Anthony Liguori7267c092011-08-20 22:09:37 -0500401 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000402}
403
aliguoria1d1bb32008-11-18 20:07:32 +0000404/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100405void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000406{
aliguoric0ce9982008-11-25 22:13:57 +0000407 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000408
Blue Swirl72cf2d42009-09-12 07:36:22 +0000409 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000410 if (wp->flags & mask)
411 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000412 }
aliguoria1d1bb32008-11-18 20:07:32 +0000413}
Paul Brookc527ee82010-03-01 03:31:14 +0000414#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000415
416/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100417int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000418 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000419{
bellard1fddef42005-04-17 19:16:13 +0000420#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000421 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000422
Anthony Liguori7267c092011-08-20 22:09:37 -0500423 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000424
425 bp->pc = pc;
426 bp->flags = flags;
427
aliguori2dc9f412008-11-18 20:56:59 +0000428 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000429 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000430 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000431 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000432 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000433
434 breakpoint_invalidate(env, pc);
435
436 if (breakpoint)
437 *breakpoint = bp;
438 return 0;
439#else
440 return -ENOSYS;
441#endif
442}
443
444/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100445int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000446{
447#if defined(TARGET_HAS_ICE)
448 CPUBreakpoint *bp;
449
Blue Swirl72cf2d42009-09-12 07:36:22 +0000450 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000451 if (bp->pc == pc && bp->flags == flags) {
452 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000453 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000454 }
bellard4c3a88a2003-07-26 12:06:08 +0000455 }
aliguoria1d1bb32008-11-18 20:07:32 +0000456 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000457#else
aliguoria1d1bb32008-11-18 20:07:32 +0000458 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000459#endif
460}
461
aliguoria1d1bb32008-11-18 20:07:32 +0000462/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100463void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000464{
bellard1fddef42005-04-17 19:16:13 +0000465#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000466 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000467
aliguoria1d1bb32008-11-18 20:07:32 +0000468 breakpoint_invalidate(env, breakpoint->pc);
469
Anthony Liguori7267c092011-08-20 22:09:37 -0500470 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000471#endif
472}
473
474/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100475void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000476{
477#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000478 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000479
Blue Swirl72cf2d42009-09-12 07:36:22 +0000480 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000481 if (bp->flags & mask)
482 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000483 }
bellard4c3a88a2003-07-26 12:06:08 +0000484#endif
485}
486
bellardc33a3462003-07-29 20:50:33 +0000487/* enable or disable single step mode. EXCP_DEBUG is returned by the
488 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100489void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000490{
bellard1fddef42005-04-17 19:16:13 +0000491#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000492 if (env->singlestep_enabled != enabled) {
493 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000494 if (kvm_enabled())
495 kvm_update_guest_debug(env, 0);
496 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100497 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000498 /* XXX: only flush what is necessary */
499 tb_flush(env);
500 }
bellardc33a3462003-07-29 20:50:33 +0000501 }
502#endif
503}
504
Andreas Färber9349b4f2012-03-14 01:38:32 +0100505void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000506{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100507 CPUState *cpu = ENV_GET_CPU(env);
508
509 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000510 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000511}
512
Andreas Färber9349b4f2012-03-14 01:38:32 +0100513void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000514{
515 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000516 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000517
518 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000519 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000520 fprintf(stderr, "qemu: fatal: ");
521 vfprintf(stderr, fmt, ap);
522 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100523 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000524 if (qemu_log_enabled()) {
525 qemu_log("qemu: fatal: ");
526 qemu_log_vprintf(fmt, ap2);
527 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100528 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000529 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000530 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000531 }
pbrook493ae1f2007-11-23 16:53:59 +0000532 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000533 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200534#if defined(CONFIG_USER_ONLY)
535 {
536 struct sigaction act;
537 sigfillset(&act.sa_mask);
538 act.sa_handler = SIG_DFL;
539 sigaction(SIGABRT, &act, NULL);
540 }
541#endif
bellard75012672003-06-21 13:11:07 +0000542 abort();
543}
544
Andreas Färber9349b4f2012-03-14 01:38:32 +0100545CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000546{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100547 CPUArchState *new_env = cpu_init(env->cpu_model_str);
548 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000549#if defined(TARGET_HAS_ICE)
550 CPUBreakpoint *bp;
551 CPUWatchpoint *wp;
552#endif
553
Andreas Färber9349b4f2012-03-14 01:38:32 +0100554 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000555
Andreas Färber55e5c282012-12-17 06:18:02 +0100556 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000557 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000558
559 /* Clone all break/watchpoints.
560 Note: Once we support ptrace with hw-debug register access, make sure
561 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000562 QTAILQ_INIT(&env->breakpoints);
563 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000564#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000566 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
567 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000568 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000569 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
570 wp->flags, NULL);
571 }
572#endif
573
thsc5be9f02007-02-28 20:20:53 +0000574 return new_env;
575}
576
bellard01243112004-01-04 15:48:17 +0000577#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200578static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
579 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000580{
Juan Quintelad24981d2012-05-22 00:42:40 +0200581 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000582
bellard1ccde1c2004-02-06 19:46:14 +0000583 /* we modify the TLB cache so that the dirty bit will be set again
584 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200585 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200586 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000587 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200588 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000589 != (end - 1) - start) {
590 abort();
591 }
Blue Swirle5548612012-04-21 13:08:33 +0000592 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200593
594}
595
596/* Note: start and end must be within the same ram block. */
597void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
598 int dirty_flags)
599{
600 uintptr_t length;
601
602 start &= TARGET_PAGE_MASK;
603 end = TARGET_PAGE_ALIGN(end);
604
605 length = end - start;
606 if (length == 0)
607 return;
608 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
609
610 if (tcg_enabled()) {
611 tlb_reset_dirty_range_all(start, end, length);
612 }
bellard1ccde1c2004-02-06 19:46:14 +0000613}
614
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000615static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000616{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200617 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000618 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200619 return ret;
aliguori74576192008-10-06 14:02:03 +0000620}
621
Avi Kivitya8170e52012-10-23 12:30:10 +0200622hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +0000623 MemoryRegionSection *section,
624 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200625 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +0000626 int prot,
627 target_ulong *address)
628{
Avi Kivitya8170e52012-10-23 12:30:10 +0200629 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000630 CPUWatchpoint *wp;
631
Blue Swirlcc5bea62012-04-14 14:56:48 +0000632 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000633 /* Normal RAM. */
634 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000635 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000636 if (!section->readonly) {
637 iotlb |= phys_section_notdirty;
638 } else {
639 iotlb |= phys_section_rom;
640 }
641 } else {
Blue Swirle5548612012-04-21 13:08:33 +0000642 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +0000643 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +0000644 }
645
646 /* Make accesses to pages with watchpoints go via the
647 watchpoint trap routines. */
648 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
649 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
650 /* Avoid trapping reads of pages with a write breakpoint. */
651 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
652 iotlb = phys_section_watch + paddr;
653 *address |= TLB_MMIO;
654 break;
655 }
656 }
657 }
658
659 return iotlb;
660}
bellard9fa3e852004-01-04 18:06:42 +0000661#endif /* defined(CONFIG_USER_ONLY) */
662
pbrooke2eef172008-06-08 01:09:01 +0000663#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000664
Paul Brookc04b2b72010-03-01 03:31:14 +0000665#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
666typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200667 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200668 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200669 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000670} subpage_t;
671
Anthony Liguoric227f092009-10-01 16:12:16 -0500672static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200673 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200674static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200675static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200676{
Avi Kivity5312bd82012-02-12 18:32:55 +0200677 MemoryRegionSection *section = &phys_sections[section_index];
678 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200679
680 if (mr->subpage) {
681 subpage_t *subpage = container_of(mr, subpage_t, iomem);
682 memory_region_destroy(&subpage->iomem);
683 g_free(subpage);
684 }
685}
686
Avi Kivity4346ae32012-02-10 17:00:01 +0200687static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200688{
689 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200690 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200691
Avi Kivityc19e8802012-02-13 20:25:31 +0200692 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200693 return;
694 }
695
Avi Kivityc19e8802012-02-13 20:25:31 +0200696 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200697 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200698 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200699 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200700 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200701 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200702 }
Avi Kivity54688b12012-02-09 17:34:32 +0200703 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200704 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200705 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200706}
707
Avi Kivityac1970f2012-10-03 16:22:53 +0200708static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200709{
Avi Kivityac1970f2012-10-03 16:22:53 +0200710 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200711 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200712}
713
Avi Kivity5312bd82012-02-12 18:32:55 +0200714static uint16_t phys_section_add(MemoryRegionSection *section)
715{
716 if (phys_sections_nb == phys_sections_nb_alloc) {
717 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
718 phys_sections = g_renew(MemoryRegionSection, phys_sections,
719 phys_sections_nb_alloc);
720 }
721 phys_sections[phys_sections_nb] = *section;
722 return phys_sections_nb++;
723}
724
725static void phys_sections_clear(void)
726{
727 phys_sections_nb = 0;
728}
729
Avi Kivityac1970f2012-10-03 16:22:53 +0200730static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200731{
732 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200733 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200734 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200735 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200736 MemoryRegionSection subsection = {
737 .offset_within_address_space = base,
738 .size = TARGET_PAGE_SIZE,
739 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200740 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200741
Avi Kivityf3705d52012-03-08 16:16:34 +0200742 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200743
Avi Kivityf3705d52012-03-08 16:16:34 +0200744 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200745 subpage = subpage_init(base);
746 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200747 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200748 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200749 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200750 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200751 }
752 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400753 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200754 subpage_register(subpage, start, end, phys_section_add(section));
755}
756
757
Avi Kivityac1970f2012-10-03 16:22:53 +0200758static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000759{
Avi Kivitya8170e52012-10-23 12:30:10 +0200760 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200761 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200762 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200763 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200764
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200765 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200766
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200767 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200768 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200769 section_index);
bellard33417e72003-08-10 21:47:01 +0000770}
771
Avi Kivityac1970f2012-10-03 16:22:53 +0200772static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200773{
Avi Kivityac1970f2012-10-03 16:22:53 +0200774 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200775 MemoryRegionSection now = *section, remain = *section;
776
777 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
778 || (now.size < TARGET_PAGE_SIZE)) {
779 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
780 - now.offset_within_address_space,
781 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200782 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200783 remain.size -= now.size;
784 remain.offset_within_address_space += now.size;
785 remain.offset_within_region += now.size;
786 }
Tyler Hall69b67642012-07-25 18:45:04 -0400787 while (remain.size >= TARGET_PAGE_SIZE) {
788 now = remain;
789 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
790 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200791 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400792 } else {
793 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200794 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400795 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200796 remain.size -= now.size;
797 remain.offset_within_address_space += now.size;
798 remain.offset_within_region += now.size;
799 }
800 now = remain;
801 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200802 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200803 }
804}
805
Sheng Yang62a27442010-01-26 19:21:16 +0800806void qemu_flush_coalesced_mmio_buffer(void)
807{
808 if (kvm_enabled())
809 kvm_flush_coalesced_mmio_buffer();
810}
811
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700812void qemu_mutex_lock_ramlist(void)
813{
814 qemu_mutex_lock(&ram_list.mutex);
815}
816
817void qemu_mutex_unlock_ramlist(void)
818{
819 qemu_mutex_unlock(&ram_list.mutex);
820}
821
Marcelo Tosattic9027602010-03-01 20:25:08 -0300822#if defined(__linux__) && !defined(TARGET_S390X)
823
824#include <sys/vfs.h>
825
826#define HUGETLBFS_MAGIC 0x958458f6
827
828static long gethugepagesize(const char *path)
829{
830 struct statfs fs;
831 int ret;
832
833 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900834 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300835 } while (ret != 0 && errno == EINTR);
836
837 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900838 perror(path);
839 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300840 }
841
842 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900843 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300844
845 return fs.f_bsize;
846}
847
Alex Williamson04b16652010-07-02 11:13:17 -0600848static void *file_ram_alloc(RAMBlock *block,
849 ram_addr_t memory,
850 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300851{
852 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500853 char *sanitized_name;
854 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300855 void *area;
856 int fd;
857#ifdef MAP_POPULATE
858 int flags;
859#endif
860 unsigned long hpagesize;
861
862 hpagesize = gethugepagesize(path);
863 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900864 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300865 }
866
867 if (memory < hpagesize) {
868 return NULL;
869 }
870
871 if (kvm_enabled() && !kvm_has_sync_mmu()) {
872 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
873 return NULL;
874 }
875
Peter Feiner8ca761f2013-03-04 13:54:25 -0500876 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
877 sanitized_name = g_strdup(block->mr->name);
878 for (c = sanitized_name; *c != '\0'; c++) {
879 if (*c == '/')
880 *c = '_';
881 }
882
883 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
884 sanitized_name);
885 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300886
887 fd = mkstemp(filename);
888 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900889 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100890 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900891 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300892 }
893 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100894 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300895
896 memory = (memory+hpagesize-1) & ~(hpagesize-1);
897
898 /*
899 * ftruncate is not supported by hugetlbfs in older
900 * hosts, so don't bother bailing out on errors.
901 * If anything goes wrong with it under other filesystems,
902 * mmap will fail.
903 */
904 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900905 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300906
907#ifdef MAP_POPULATE
908 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
909 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
910 * to sidestep this quirk.
911 */
912 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
913 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
914#else
915 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
916#endif
917 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900918 perror("file_ram_alloc: can't mmap RAM pages");
919 close(fd);
920 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300921 }
Alex Williamson04b16652010-07-02 11:13:17 -0600922 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300923 return area;
924}
925#endif
926
Alex Williamsond17b5282010-06-25 11:08:38 -0600927static ram_addr_t find_ram_offset(ram_addr_t size)
928{
Alex Williamson04b16652010-07-02 11:13:17 -0600929 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600930 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600931
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100932 assert(size != 0); /* it would hand out same offset multiple times */
933
Paolo Bonzinia3161032012-11-14 15:54:48 +0100934 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600935 return 0;
936
Paolo Bonzinia3161032012-11-14 15:54:48 +0100937 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000938 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600939
940 end = block->offset + block->length;
941
Paolo Bonzinia3161032012-11-14 15:54:48 +0100942 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600943 if (next_block->offset >= end) {
944 next = MIN(next, next_block->offset);
945 }
946 }
947 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600948 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600949 mingap = next - end;
950 }
951 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600952
953 if (offset == RAM_ADDR_MAX) {
954 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
955 (uint64_t)size);
956 abort();
957 }
958
Alex Williamson04b16652010-07-02 11:13:17 -0600959 return offset;
960}
961
Juan Quintela652d7ec2012-07-20 10:37:54 +0200962ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600963{
Alex Williamsond17b5282010-06-25 11:08:38 -0600964 RAMBlock *block;
965 ram_addr_t last = 0;
966
Paolo Bonzinia3161032012-11-14 15:54:48 +0100967 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600968 last = MAX(last, block->offset + block->length);
969
970 return last;
971}
972
Jason Baronddb97f12012-08-02 15:44:16 -0400973static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
974{
975 int ret;
976 QemuOpts *machine_opts;
977
978 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
979 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
980 if (machine_opts &&
981 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
982 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
983 if (ret) {
984 perror("qemu_madvise");
985 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
986 "but dump_guest_core=off specified\n");
987 }
988 }
989}
990
Avi Kivityc5705a72011-12-20 15:59:12 +0200991void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -0600992{
993 RAMBlock *new_block, *block;
994
Avi Kivityc5705a72011-12-20 15:59:12 +0200995 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +0100996 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +0200997 if (block->offset == addr) {
998 new_block = block;
999 break;
1000 }
1001 }
1002 assert(new_block);
1003 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001004
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001005 if (dev) {
1006 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001007 if (id) {
1008 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001009 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001010 }
1011 }
1012 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1013
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001014 /* This assumes the iothread lock is taken here too. */
1015 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001016 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001017 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001018 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1019 new_block->idstr);
1020 abort();
1021 }
1022 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001023 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001024}
1025
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001026static int memory_try_enable_merging(void *addr, size_t len)
1027{
1028 QemuOpts *opts;
1029
1030 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1031 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1032 /* disabled by the user */
1033 return 0;
1034 }
1035
1036 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1037}
1038
Avi Kivityc5705a72011-12-20 15:59:12 +02001039ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1040 MemoryRegion *mr)
1041{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001042 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001043
1044 size = TARGET_PAGE_ALIGN(size);
1045 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001046
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001047 /* This assumes the iothread lock is taken here too. */
1048 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001049 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001050 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001051 if (host) {
1052 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001053 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001054 } else {
1055 if (mem_path) {
1056#if defined (__linux__) && !defined(TARGET_S390X)
1057 new_block->host = file_ram_alloc(new_block, size, mem_path);
1058 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001059 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001060 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001061 }
1062#else
1063 fprintf(stderr, "-mem-path option unsupported\n");
1064 exit(1);
1065#endif
1066 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001067 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001068 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001069 } else if (kvm_enabled()) {
1070 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001071 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001072 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001073 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001074 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001075 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001076 }
1077 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001078 new_block->length = size;
1079
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001080 /* Keep the list sorted from biggest to smallest block. */
1081 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1082 if (block->length < new_block->length) {
1083 break;
1084 }
1085 }
1086 if (block) {
1087 QTAILQ_INSERT_BEFORE(block, new_block, next);
1088 } else {
1089 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1090 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001091 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001092
Umesh Deshpandef798b072011-08-18 11:41:17 -07001093 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001094 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001095
Anthony Liguori7267c092011-08-20 22:09:37 -05001096 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001097 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001098 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1099 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001100 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001101
Jason Baronddb97f12012-08-02 15:44:16 -04001102 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001103 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001104
Cam Macdonell84b89d72010-07-26 18:10:57 -06001105 if (kvm_enabled())
1106 kvm_setup_guest_memory(new_block->host, size);
1107
1108 return new_block->offset;
1109}
1110
Avi Kivityc5705a72011-12-20 15:59:12 +02001111ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001112{
Avi Kivityc5705a72011-12-20 15:59:12 +02001113 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001114}
bellarde9a1ab12007-02-08 23:08:38 +00001115
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001116void qemu_ram_free_from_ptr(ram_addr_t addr)
1117{
1118 RAMBlock *block;
1119
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001120 /* This assumes the iothread lock is taken here too. */
1121 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001122 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001123 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001124 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001125 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001126 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001127 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001128 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001129 }
1130 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001131 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001132}
1133
Anthony Liguoric227f092009-10-01 16:12:16 -05001134void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001135{
Alex Williamson04b16652010-07-02 11:13:17 -06001136 RAMBlock *block;
1137
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001138 /* This assumes the iothread lock is taken here too. */
1139 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001140 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001141 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001142 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001143 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001144 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001145 if (block->flags & RAM_PREALLOC_MASK) {
1146 ;
1147 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001148#if defined (__linux__) && !defined(TARGET_S390X)
1149 if (block->fd) {
1150 munmap(block->host, block->length);
1151 close(block->fd);
1152 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001153 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001154 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001155#else
1156 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001157#endif
1158 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001159 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001160 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001161 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001162 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001163 }
Alex Williamson04b16652010-07-02 11:13:17 -06001164 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001165 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001166 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001167 }
1168 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001169 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001170
bellarde9a1ab12007-02-08 23:08:38 +00001171}
1172
Huang Yingcd19cfa2011-03-02 08:56:19 +01001173#ifndef _WIN32
1174void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1175{
1176 RAMBlock *block;
1177 ram_addr_t offset;
1178 int flags;
1179 void *area, *vaddr;
1180
Paolo Bonzinia3161032012-11-14 15:54:48 +01001181 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001182 offset = addr - block->offset;
1183 if (offset < block->length) {
1184 vaddr = block->host + offset;
1185 if (block->flags & RAM_PREALLOC_MASK) {
1186 ;
1187 } else {
1188 flags = MAP_FIXED;
1189 munmap(vaddr, length);
1190 if (mem_path) {
1191#if defined(__linux__) && !defined(TARGET_S390X)
1192 if (block->fd) {
1193#ifdef MAP_POPULATE
1194 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1195 MAP_PRIVATE;
1196#else
1197 flags |= MAP_PRIVATE;
1198#endif
1199 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1200 flags, block->fd, offset);
1201 } else {
1202 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1203 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1204 flags, -1, 0);
1205 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001206#else
1207 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001208#endif
1209 } else {
1210#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1211 flags |= MAP_SHARED | MAP_ANONYMOUS;
1212 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1213 flags, -1, 0);
1214#else
1215 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1216 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1217 flags, -1, 0);
1218#endif
1219 }
1220 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001221 fprintf(stderr, "Could not remap addr: "
1222 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001223 length, addr);
1224 exit(1);
1225 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001226 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001227 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001228 }
1229 return;
1230 }
1231 }
1232}
1233#endif /* !_WIN32 */
1234
pbrookdc828ca2009-04-09 22:21:07 +00001235/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001236 With the exception of the softmmu code in this file, this should
1237 only be used for local memory (e.g. video ram) that the device owns,
1238 and knows it isn't going to access beyond the end of the block.
1239
1240 It should not be used for general purpose DMA.
1241 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1242 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001243void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001244{
pbrook94a6b542009-04-11 17:15:54 +00001245 RAMBlock *block;
1246
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001247 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001248 block = ram_list.mru_block;
1249 if (block && addr - block->offset < block->length) {
1250 goto found;
1251 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001252 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001253 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001254 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001255 }
pbrook94a6b542009-04-11 17:15:54 +00001256 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001257
1258 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1259 abort();
1260
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001261found:
1262 ram_list.mru_block = block;
1263 if (xen_enabled()) {
1264 /* We need to check if the requested address is in the RAM
1265 * because we don't want to map the entire memory in QEMU.
1266 * In that case just map until the end of the page.
1267 */
1268 if (block->offset == 0) {
1269 return xen_map_cache(addr, 0, 0);
1270 } else if (block->host == NULL) {
1271 block->host =
1272 xen_map_cache(block->offset, block->length, 1);
1273 }
1274 }
1275 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001276}
1277
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001278/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1279 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1280 *
1281 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001282 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001283static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001284{
1285 RAMBlock *block;
1286
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001287 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001288 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001289 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001290 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001291 /* We need to check if the requested address is in the RAM
1292 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001293 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001294 */
1295 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001296 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001297 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001298 block->host =
1299 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001300 }
1301 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001302 return block->host + (addr - block->offset);
1303 }
1304 }
1305
1306 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1307 abort();
1308
1309 return NULL;
1310}
1311
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001312/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1313 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001314static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001315{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001316 if (*size == 0) {
1317 return NULL;
1318 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001319 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001320 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001321 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001322 RAMBlock *block;
1323
Paolo Bonzinia3161032012-11-14 15:54:48 +01001324 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001325 if (addr - block->offset < block->length) {
1326 if (addr - block->offset + *size > block->length)
1327 *size = block->length - addr + block->offset;
1328 return block->host + (addr - block->offset);
1329 }
1330 }
1331
1332 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1333 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001334 }
1335}
1336
Marcelo Tosattie8902612010-10-11 15:31:19 -03001337int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001338{
pbrook94a6b542009-04-11 17:15:54 +00001339 RAMBlock *block;
1340 uint8_t *host = ptr;
1341
Jan Kiszka868bb332011-06-21 22:59:09 +02001342 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001343 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001344 return 0;
1345 }
1346
Paolo Bonzinia3161032012-11-14 15:54:48 +01001347 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001348 /* This case append when the block is not mapped. */
1349 if (block->host == NULL) {
1350 continue;
1351 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001352 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001353 *ram_addr = block->offset + (host - block->host);
1354 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001355 }
pbrook94a6b542009-04-11 17:15:54 +00001356 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001357
Marcelo Tosattie8902612010-10-11 15:31:19 -03001358 return -1;
1359}
Alex Williamsonf471a172010-06-11 11:11:42 -06001360
Marcelo Tosattie8902612010-10-11 15:31:19 -03001361/* Some of the softmmu routines need to translate from a host pointer
1362 (typically a TLB entry) back to a ram offset. */
1363ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1364{
1365 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001366
Marcelo Tosattie8902612010-10-11 15:31:19 -03001367 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1368 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1369 abort();
1370 }
1371 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001372}
1373
Avi Kivitya8170e52012-10-23 12:30:10 +02001374static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001375 unsigned size)
bellard33417e72003-08-10 21:47:01 +00001376{
pbrook67d3b952006-12-18 05:03:52 +00001377#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00001378 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00001379#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001380#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001381 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001382#endif
1383 return 0;
1384}
1385
Avi Kivitya8170e52012-10-23 12:30:10 +02001386static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001387 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00001388{
1389#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001390 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00001391#endif
Richard Henderson5b450402011-04-18 16:13:12 -07001392#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001393 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00001394#endif
1395}
1396
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001397static const MemoryRegionOps unassigned_mem_ops = {
1398 .read = unassigned_mem_read,
1399 .write = unassigned_mem_write,
1400 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001401};
1402
Avi Kivitya8170e52012-10-23 12:30:10 +02001403static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001404 unsigned size)
1405{
1406 abort();
1407}
1408
Avi Kivitya8170e52012-10-23 12:30:10 +02001409static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001410 uint64_t value, unsigned size)
1411{
1412 abort();
1413}
1414
1415static const MemoryRegionOps error_mem_ops = {
1416 .read = error_mem_read,
1417 .write = error_mem_write,
1418 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001419};
1420
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001421static const MemoryRegionOps rom_mem_ops = {
1422 .read = error_mem_read,
1423 .write = unassigned_mem_write,
1424 .endianness = DEVICE_NATIVE_ENDIAN,
1425};
1426
Avi Kivitya8170e52012-10-23 12:30:10 +02001427static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001428 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001429{
bellard3a7d9292005-08-21 09:26:42 +00001430 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001431 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001432 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1433#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001434 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001435 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001436#endif
1437 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001438 switch (size) {
1439 case 1:
1440 stb_p(qemu_get_ram_ptr(ram_addr), val);
1441 break;
1442 case 2:
1443 stw_p(qemu_get_ram_ptr(ram_addr), val);
1444 break;
1445 case 4:
1446 stl_p(qemu_get_ram_ptr(ram_addr), val);
1447 break;
1448 default:
1449 abort();
1450 }
bellardf23db162005-08-21 19:12:28 +00001451 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001452 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001453 /* we remove the notdirty callback only if the code has been
1454 flushed */
1455 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001456 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001457}
1458
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001459static const MemoryRegionOps notdirty_mem_ops = {
1460 .read = error_mem_read,
1461 .write = notdirty_mem_write,
1462 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001463};
1464
pbrook0f459d12008-06-09 00:20:13 +00001465/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001466static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001467{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001468 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001469 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001470 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001471 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001472 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001473
aliguori06d55cc2008-11-18 20:24:06 +00001474 if (env->watchpoint_hit) {
1475 /* We re-entered the check after replacing the TB. Now raise
1476 * the debug interrupt so that is will trigger after the
1477 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001478 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001479 return;
1480 }
pbrook2e70f6e2008-06-29 01:03:05 +00001481 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001482 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001483 if ((vaddr == (wp->vaddr & len_mask) ||
1484 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001485 wp->flags |= BP_WATCHPOINT_HIT;
1486 if (!env->watchpoint_hit) {
1487 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001488 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001489 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1490 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001491 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001492 } else {
1493 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1494 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001495 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001496 }
aliguori06d55cc2008-11-18 20:24:06 +00001497 }
aliguori6e140f22008-11-18 20:37:55 +00001498 } else {
1499 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001500 }
1501 }
1502}
1503
pbrook6658ffb2007-03-16 23:58:11 +00001504/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1505 so these check for a hit then pass through to the normal out-of-line
1506 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001507static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001508 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001509{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001510 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1511 switch (size) {
1512 case 1: return ldub_phys(addr);
1513 case 2: return lduw_phys(addr);
1514 case 4: return ldl_phys(addr);
1515 default: abort();
1516 }
pbrook6658ffb2007-03-16 23:58:11 +00001517}
1518
Avi Kivitya8170e52012-10-23 12:30:10 +02001519static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001520 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001521{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001522 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1523 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001524 case 1:
1525 stb_phys(addr, val);
1526 break;
1527 case 2:
1528 stw_phys(addr, val);
1529 break;
1530 case 4:
1531 stl_phys(addr, val);
1532 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001533 default: abort();
1534 }
pbrook6658ffb2007-03-16 23:58:11 +00001535}
1536
Avi Kivity1ec9b902012-01-02 12:47:48 +02001537static const MemoryRegionOps watch_mem_ops = {
1538 .read = watch_mem_read,
1539 .write = watch_mem_write,
1540 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001541};
pbrook6658ffb2007-03-16 23:58:11 +00001542
Avi Kivitya8170e52012-10-23 12:30:10 +02001543static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001544 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001545{
Avi Kivity70c68e42012-01-02 12:32:48 +02001546 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001547 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001548 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001549#if defined(DEBUG_SUBPAGE)
1550 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1551 mmio, len, addr, idx);
1552#endif
blueswir1db7b5422007-05-26 17:36:03 +00001553
Avi Kivity5312bd82012-02-12 18:32:55 +02001554 section = &phys_sections[mmio->sub_section[idx]];
1555 addr += mmio->base;
1556 addr -= section->offset_within_address_space;
1557 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001558 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001559}
1560
Avi Kivitya8170e52012-10-23 12:30:10 +02001561static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001562 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001563{
Avi Kivity70c68e42012-01-02 12:32:48 +02001564 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001565 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001566 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001567#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001568 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1569 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001570 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001571#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001572
Avi Kivity5312bd82012-02-12 18:32:55 +02001573 section = &phys_sections[mmio->sub_section[idx]];
1574 addr += mmio->base;
1575 addr -= section->offset_within_address_space;
1576 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001577 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001578}
1579
Avi Kivity70c68e42012-01-02 12:32:48 +02001580static const MemoryRegionOps subpage_ops = {
1581 .read = subpage_read,
1582 .write = subpage_write,
1583 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001584};
1585
Avi Kivitya8170e52012-10-23 12:30:10 +02001586static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001587 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001588{
1589 ram_addr_t raddr = addr;
1590 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001591 switch (size) {
1592 case 1: return ldub_p(ptr);
1593 case 2: return lduw_p(ptr);
1594 case 4: return ldl_p(ptr);
1595 default: abort();
1596 }
Andreas Färber56384e82011-11-30 16:26:21 +01001597}
1598
Avi Kivitya8170e52012-10-23 12:30:10 +02001599static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001600 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001601{
1602 ram_addr_t raddr = addr;
1603 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001604 switch (size) {
1605 case 1: return stb_p(ptr, value);
1606 case 2: return stw_p(ptr, value);
1607 case 4: return stl_p(ptr, value);
1608 default: abort();
1609 }
Andreas Färber56384e82011-11-30 16:26:21 +01001610}
1611
Avi Kivityde712f92012-01-02 12:41:07 +02001612static const MemoryRegionOps subpage_ram_ops = {
1613 .read = subpage_ram_read,
1614 .write = subpage_ram_write,
1615 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001616};
1617
Anthony Liguoric227f092009-10-01 16:12:16 -05001618static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001619 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001620{
1621 int idx, eidx;
1622
1623 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1624 return -1;
1625 idx = SUBPAGE_IDX(start);
1626 eidx = SUBPAGE_IDX(end);
1627#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001628 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001629 mmio, start, end, idx, eidx, memory);
1630#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001631 if (memory_region_is_ram(phys_sections[section].mr)) {
1632 MemoryRegionSection new_section = phys_sections[section];
1633 new_section.mr = &io_mem_subpage_ram;
1634 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001635 }
blueswir1db7b5422007-05-26 17:36:03 +00001636 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001637 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001638 }
1639
1640 return 0;
1641}
1642
Avi Kivitya8170e52012-10-23 12:30:10 +02001643static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001644{
Anthony Liguoric227f092009-10-01 16:12:16 -05001645 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001646
Anthony Liguori7267c092011-08-20 22:09:37 -05001647 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001648
1649 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001650 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1651 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001652 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001653#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001654 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1655 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001656#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001657 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001658
1659 return mmio;
1660}
1661
Avi Kivity5312bd82012-02-12 18:32:55 +02001662static uint16_t dummy_section(MemoryRegion *mr)
1663{
1664 MemoryRegionSection section = {
1665 .mr = mr,
1666 .offset_within_address_space = 0,
1667 .offset_within_region = 0,
1668 .size = UINT64_MAX,
1669 };
1670
1671 return phys_section_add(&section);
1672}
1673
Avi Kivitya8170e52012-10-23 12:30:10 +02001674MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001675{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001676 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001677}
1678
Avi Kivitye9179ce2009-06-14 11:38:52 +03001679static void io_mem_init(void)
1680{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001681 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001682 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
1683 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1684 "unassigned", UINT64_MAX);
1685 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1686 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001687 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1688 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001689 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1690 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001691}
1692
Avi Kivityac1970f2012-10-03 16:22:53 +02001693static void mem_begin(MemoryListener *listener)
1694{
1695 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1696
1697 destroy_all_mappings(d);
1698 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1699}
1700
Avi Kivity50c1e142012-02-08 21:36:02 +02001701static void core_begin(MemoryListener *listener)
1702{
Avi Kivity5312bd82012-02-12 18:32:55 +02001703 phys_sections_clear();
1704 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001705 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1706 phys_section_rom = dummy_section(&io_mem_rom);
1707 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001708}
1709
Avi Kivity1d711482012-10-02 18:54:45 +02001710static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001711{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001712 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001713
1714 /* since each CPU stores ram addresses in its TLB cache, we must
1715 reset the modified entries */
1716 /* XXX: slow ! */
1717 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1718 tlb_flush(env, 1);
1719 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001720}
1721
Avi Kivity93632742012-02-08 16:54:16 +02001722static void core_log_global_start(MemoryListener *listener)
1723{
1724 cpu_physical_memory_set_dirty_tracking(1);
1725}
1726
1727static void core_log_global_stop(MemoryListener *listener)
1728{
1729 cpu_physical_memory_set_dirty_tracking(0);
1730}
1731
Avi Kivity4855d412012-02-08 21:16:05 +02001732static void io_region_add(MemoryListener *listener,
1733 MemoryRegionSection *section)
1734{
Avi Kivitya2d33522012-03-05 17:40:12 +02001735 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1736
1737 mrio->mr = section->mr;
1738 mrio->offset = section->offset_within_region;
1739 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001740 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001741 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001742}
1743
1744static void io_region_del(MemoryListener *listener,
1745 MemoryRegionSection *section)
1746{
1747 isa_unassign_ioport(section->offset_within_address_space, section->size);
1748}
1749
Avi Kivity93632742012-02-08 16:54:16 +02001750static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001751 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001752 .log_global_start = core_log_global_start,
1753 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001754 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001755};
1756
Avi Kivity4855d412012-02-08 21:16:05 +02001757static MemoryListener io_memory_listener = {
1758 .region_add = io_region_add,
1759 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001760 .priority = 0,
1761};
1762
Avi Kivity1d711482012-10-02 18:54:45 +02001763static MemoryListener tcg_memory_listener = {
1764 .commit = tcg_commit,
1765};
1766
Avi Kivityac1970f2012-10-03 16:22:53 +02001767void address_space_init_dispatch(AddressSpace *as)
1768{
1769 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1770
1771 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1772 d->listener = (MemoryListener) {
1773 .begin = mem_begin,
1774 .region_add = mem_add,
1775 .region_nop = mem_add,
1776 .priority = 0,
1777 };
1778 as->dispatch = d;
1779 memory_listener_register(&d->listener, as);
1780}
1781
Avi Kivity83f3c252012-10-07 12:59:55 +02001782void address_space_destroy_dispatch(AddressSpace *as)
1783{
1784 AddressSpaceDispatch *d = as->dispatch;
1785
1786 memory_listener_unregister(&d->listener);
1787 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1788 g_free(d);
1789 as->dispatch = NULL;
1790}
1791
Avi Kivity62152b82011-07-26 14:26:14 +03001792static void memory_map_init(void)
1793{
Anthony Liguori7267c092011-08-20 22:09:37 -05001794 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001795 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001796 address_space_init(&address_space_memory, system_memory);
1797 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001798
Anthony Liguori7267c092011-08-20 22:09:37 -05001799 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001800 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001801 address_space_init(&address_space_io, system_io);
1802 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001803
Avi Kivityf6790af2012-10-02 20:13:51 +02001804 memory_listener_register(&core_memory_listener, &address_space_memory);
1805 memory_listener_register(&io_memory_listener, &address_space_io);
1806 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001807
1808 dma_context_init(&dma_context_memory, &address_space_memory,
1809 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001810}
1811
1812MemoryRegion *get_system_memory(void)
1813{
1814 return system_memory;
1815}
1816
Avi Kivity309cb472011-08-08 16:09:03 +03001817MemoryRegion *get_system_io(void)
1818{
1819 return system_io;
1820}
1821
pbrooke2eef172008-06-08 01:09:01 +00001822#endif /* !defined(CONFIG_USER_ONLY) */
1823
bellard13eb76e2004-01-24 15:23:36 +00001824/* physical memory access (slow version, mainly for debug) */
1825#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001826int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001827 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001828{
1829 int l, flags;
1830 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001831 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001832
1833 while (len > 0) {
1834 page = addr & TARGET_PAGE_MASK;
1835 l = (page + TARGET_PAGE_SIZE) - addr;
1836 if (l > len)
1837 l = len;
1838 flags = page_get_flags(page);
1839 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001840 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001841 if (is_write) {
1842 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001843 return -1;
bellard579a97f2007-11-11 14:26:47 +00001844 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001845 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001846 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001847 memcpy(p, buf, l);
1848 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001849 } else {
1850 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001851 return -1;
bellard579a97f2007-11-11 14:26:47 +00001852 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001853 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001854 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001855 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001856 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001857 }
1858 len -= l;
1859 buf += l;
1860 addr += l;
1861 }
Paul Brooka68fe892010-03-01 00:08:59 +00001862 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001863}
bellard8df1cd02005-01-28 22:37:22 +00001864
bellard13eb76e2004-01-24 15:23:36 +00001865#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001866
Avi Kivitya8170e52012-10-23 12:30:10 +02001867static void invalidate_and_set_dirty(hwaddr addr,
1868 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001869{
1870 if (!cpu_physical_memory_is_dirty(addr)) {
1871 /* invalidate code */
1872 tb_invalidate_phys_page_range(addr, addr + length, 0);
1873 /* set dirty bit */
1874 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1875 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001876 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001877}
1878
Avi Kivitya8170e52012-10-23 12:30:10 +02001879void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001880 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001881{
Avi Kivityac1970f2012-10-03 16:22:53 +02001882 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001883 int l;
bellard13eb76e2004-01-24 15:23:36 +00001884 uint8_t *ptr;
1885 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02001886 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001887 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001888
bellard13eb76e2004-01-24 15:23:36 +00001889 while (len > 0) {
1890 page = addr & TARGET_PAGE_MASK;
1891 l = (page + TARGET_PAGE_SIZE) - addr;
1892 if (l > len)
1893 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02001894 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00001895
bellard13eb76e2004-01-24 15:23:36 +00001896 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001897 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001898 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001899 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00001900 /* XXX: could force cpu_single_env to NULL to avoid
1901 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001902 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001903 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001904 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001905 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001906 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001907 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001908 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001909 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001910 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001911 l = 2;
1912 } else {
bellard1c213d12005-09-03 10:49:04 +00001913 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001914 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001915 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001916 l = 1;
1917 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001918 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00001919 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001920 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001921 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00001922 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001923 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001924 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001925 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001926 }
1927 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001928 if (!(memory_region_is_ram(section->mr) ||
1929 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02001930 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00001931 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00001932 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00001933 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001934 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001935 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001936 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001937 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001938 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001939 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001940 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001941 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001942 l = 2;
1943 } else {
bellard1c213d12005-09-03 10:49:04 +00001944 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001945 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001946 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001947 l = 1;
1948 }
1949 } else {
1950 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00001951 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00001952 + memory_region_section_addr(section,
1953 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02001954 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001955 }
1956 }
1957 len -= l;
1958 buf += l;
1959 addr += l;
1960 }
1961}
bellard8df1cd02005-01-28 22:37:22 +00001962
Avi Kivitya8170e52012-10-23 12:30:10 +02001963void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001964 const uint8_t *buf, int len)
1965{
1966 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1967}
1968
1969/**
1970 * address_space_read: read from an address space.
1971 *
1972 * @as: #AddressSpace to be accessed
1973 * @addr: address within that address space
1974 * @buf: buffer with the data transferred
1975 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001976void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001977{
1978 address_space_rw(as, addr, buf, len, false);
1979}
1980
1981
Avi Kivitya8170e52012-10-23 12:30:10 +02001982void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001983 int len, int is_write)
1984{
1985 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1986}
1987
bellardd0ecd2a2006-04-23 17:14:48 +00001988/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001989void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001990 const uint8_t *buf, int len)
1991{
Avi Kivityac1970f2012-10-03 16:22:53 +02001992 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00001993 int l;
1994 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02001995 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02001996 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001997
bellardd0ecd2a2006-04-23 17:14:48 +00001998 while (len > 0) {
1999 page = addr & TARGET_PAGE_MASK;
2000 l = (page + TARGET_PAGE_SIZE) - addr;
2001 if (l > len)
2002 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002003 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002004
Blue Swirlcc5bea62012-04-14 14:56:48 +00002005 if (!(memory_region_is_ram(section->mr) ||
2006 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002007 /* do nothing */
2008 } else {
2009 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002010 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002011 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00002012 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002013 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002014 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002015 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002016 }
2017 len -= l;
2018 buf += l;
2019 addr += l;
2020 }
2021}
2022
aliguori6d16c2f2009-01-22 16:59:11 +00002023typedef struct {
2024 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002025 hwaddr addr;
2026 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002027} BounceBuffer;
2028
2029static BounceBuffer bounce;
2030
aliguoriba223c22009-01-22 16:59:16 +00002031typedef struct MapClient {
2032 void *opaque;
2033 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002034 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002035} MapClient;
2036
Blue Swirl72cf2d42009-09-12 07:36:22 +00002037static QLIST_HEAD(map_client_list, MapClient) map_client_list
2038 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002039
2040void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2041{
Anthony Liguori7267c092011-08-20 22:09:37 -05002042 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002043
2044 client->opaque = opaque;
2045 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002046 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002047 return client;
2048}
2049
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002050static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002051{
2052 MapClient *client = (MapClient *)_client;
2053
Blue Swirl72cf2d42009-09-12 07:36:22 +00002054 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002055 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002056}
2057
2058static void cpu_notify_map_clients(void)
2059{
2060 MapClient *client;
2061
Blue Swirl72cf2d42009-09-12 07:36:22 +00002062 while (!QLIST_EMPTY(&map_client_list)) {
2063 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002064 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002065 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002066 }
2067}
2068
aliguori6d16c2f2009-01-22 16:59:11 +00002069/* Map a physical memory region into a host virtual address.
2070 * May map a subset of the requested range, given by and returned in *plen.
2071 * May return NULL if resources needed to perform the mapping are exhausted.
2072 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002073 * Use cpu_register_map_client() to know when retrying the map operation is
2074 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002075 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002076void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002077 hwaddr addr,
2078 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002079 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002080{
Avi Kivityac1970f2012-10-03 16:22:53 +02002081 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02002082 hwaddr len = *plen;
2083 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00002084 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002085 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02002086 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002087 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002088 ram_addr_t rlen;
2089 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002090
2091 while (len > 0) {
2092 page = addr & TARGET_PAGE_MASK;
2093 l = (page + TARGET_PAGE_SIZE) - addr;
2094 if (l > len)
2095 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02002096 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00002097
Avi Kivityf3705d52012-03-08 16:16:34 +02002098 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002099 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002100 break;
2101 }
2102 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2103 bounce.addr = addr;
2104 bounce.len = l;
2105 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002106 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002107 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002108
2109 *plen = l;
2110 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002111 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002112 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02002113 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002114 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002115 }
aliguori6d16c2f2009-01-22 16:59:11 +00002116
2117 len -= l;
2118 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002119 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002120 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002121 rlen = todo;
2122 ret = qemu_ram_ptr_length(raddr, &rlen);
2123 *plen = rlen;
2124 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002125}
2126
Avi Kivityac1970f2012-10-03 16:22:53 +02002127/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002128 * Will also mark the memory as dirty if is_write == 1. access_len gives
2129 * the amount of memory that was actually read or written by the caller.
2130 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002131void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2132 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002133{
2134 if (buffer != bounce.buffer) {
2135 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002136 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002137 while (access_len) {
2138 unsigned l;
2139 l = TARGET_PAGE_SIZE;
2140 if (l > access_len)
2141 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002142 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002143 addr1 += l;
2144 access_len -= l;
2145 }
2146 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002147 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002148 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002149 }
aliguori6d16c2f2009-01-22 16:59:11 +00002150 return;
2151 }
2152 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002153 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002154 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002155 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002156 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002157 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002158}
bellardd0ecd2a2006-04-23 17:14:48 +00002159
Avi Kivitya8170e52012-10-23 12:30:10 +02002160void *cpu_physical_memory_map(hwaddr addr,
2161 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002162 int is_write)
2163{
2164 return address_space_map(&address_space_memory, addr, plen, is_write);
2165}
2166
Avi Kivitya8170e52012-10-23 12:30:10 +02002167void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2168 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002169{
2170 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2171}
2172
bellard8df1cd02005-01-28 22:37:22 +00002173/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002174static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002175 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002176{
bellard8df1cd02005-01-28 22:37:22 +00002177 uint8_t *ptr;
2178 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002179 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002180
Avi Kivityac1970f2012-10-03 16:22:53 +02002181 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002182
Blue Swirlcc5bea62012-04-14 14:56:48 +00002183 if (!(memory_region_is_ram(section->mr) ||
2184 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002185 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002186 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002187 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002188#if defined(TARGET_WORDS_BIGENDIAN)
2189 if (endian == DEVICE_LITTLE_ENDIAN) {
2190 val = bswap32(val);
2191 }
2192#else
2193 if (endian == DEVICE_BIG_ENDIAN) {
2194 val = bswap32(val);
2195 }
2196#endif
bellard8df1cd02005-01-28 22:37:22 +00002197 } else {
2198 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002199 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002200 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002201 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002202 switch (endian) {
2203 case DEVICE_LITTLE_ENDIAN:
2204 val = ldl_le_p(ptr);
2205 break;
2206 case DEVICE_BIG_ENDIAN:
2207 val = ldl_be_p(ptr);
2208 break;
2209 default:
2210 val = ldl_p(ptr);
2211 break;
2212 }
bellard8df1cd02005-01-28 22:37:22 +00002213 }
2214 return val;
2215}
2216
Avi Kivitya8170e52012-10-23 12:30:10 +02002217uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002218{
2219 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2220}
2221
Avi Kivitya8170e52012-10-23 12:30:10 +02002222uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002223{
2224 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2225}
2226
Avi Kivitya8170e52012-10-23 12:30:10 +02002227uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002228{
2229 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2230}
2231
bellard84b7b8e2005-11-28 21:19:04 +00002232/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002233static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002234 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002235{
bellard84b7b8e2005-11-28 21:19:04 +00002236 uint8_t *ptr;
2237 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002238 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00002239
Avi Kivityac1970f2012-10-03 16:22:53 +02002240 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002241
Blue Swirlcc5bea62012-04-14 14:56:48 +00002242 if (!(memory_region_is_ram(section->mr) ||
2243 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002244 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002245 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002246
2247 /* XXX This is broken when device endian != cpu endian.
2248 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002249#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02002250 val = io_mem_read(section->mr, addr, 4) << 32;
2251 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002252#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02002253 val = io_mem_read(section->mr, addr, 4);
2254 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002255#endif
2256 } else {
2257 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002258 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002259 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002260 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002261 switch (endian) {
2262 case DEVICE_LITTLE_ENDIAN:
2263 val = ldq_le_p(ptr);
2264 break;
2265 case DEVICE_BIG_ENDIAN:
2266 val = ldq_be_p(ptr);
2267 break;
2268 default:
2269 val = ldq_p(ptr);
2270 break;
2271 }
bellard84b7b8e2005-11-28 21:19:04 +00002272 }
2273 return val;
2274}
2275
Avi Kivitya8170e52012-10-23 12:30:10 +02002276uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002277{
2278 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2279}
2280
Avi Kivitya8170e52012-10-23 12:30:10 +02002281uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002282{
2283 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2284}
2285
Avi Kivitya8170e52012-10-23 12:30:10 +02002286uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002287{
2288 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2289}
2290
bellardaab33092005-10-30 20:48:42 +00002291/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002292uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002293{
2294 uint8_t val;
2295 cpu_physical_memory_read(addr, &val, 1);
2296 return val;
2297}
2298
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002299/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002300static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002301 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002302{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002303 uint8_t *ptr;
2304 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002305 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002306
Avi Kivityac1970f2012-10-03 16:22:53 +02002307 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002308
Blue Swirlcc5bea62012-04-14 14:56:48 +00002309 if (!(memory_region_is_ram(section->mr) ||
2310 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002311 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00002312 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002313 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002314#if defined(TARGET_WORDS_BIGENDIAN)
2315 if (endian == DEVICE_LITTLE_ENDIAN) {
2316 val = bswap16(val);
2317 }
2318#else
2319 if (endian == DEVICE_BIG_ENDIAN) {
2320 val = bswap16(val);
2321 }
2322#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002323 } else {
2324 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002325 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002326 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002327 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002328 switch (endian) {
2329 case DEVICE_LITTLE_ENDIAN:
2330 val = lduw_le_p(ptr);
2331 break;
2332 case DEVICE_BIG_ENDIAN:
2333 val = lduw_be_p(ptr);
2334 break;
2335 default:
2336 val = lduw_p(ptr);
2337 break;
2338 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002339 }
2340 return val;
bellardaab33092005-10-30 20:48:42 +00002341}
2342
Avi Kivitya8170e52012-10-23 12:30:10 +02002343uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002344{
2345 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2346}
2347
Avi Kivitya8170e52012-10-23 12:30:10 +02002348uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002349{
2350 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2351}
2352
Avi Kivitya8170e52012-10-23 12:30:10 +02002353uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002354{
2355 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2356}
2357
bellard8df1cd02005-01-28 22:37:22 +00002358/* warning: addr must be aligned. The ram page is not masked as dirty
2359 and the code inside is not invalidated. It is useful if the dirty
2360 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002361void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002362{
bellard8df1cd02005-01-28 22:37:22 +00002363 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002364 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002365
Avi Kivityac1970f2012-10-03 16:22:53 +02002366 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002367
Avi Kivityf3705d52012-03-08 16:16:34 +02002368 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002369 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002370 if (memory_region_is_ram(section->mr)) {
2371 section = &phys_sections[phys_section_rom];
2372 }
2373 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002374 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002375 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002376 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002377 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00002378 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002379 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002380
2381 if (unlikely(in_migration)) {
2382 if (!cpu_physical_memory_is_dirty(addr1)) {
2383 /* invalidate code */
2384 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2385 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002386 cpu_physical_memory_set_dirty_flags(
2387 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002388 }
2389 }
bellard8df1cd02005-01-28 22:37:22 +00002390 }
2391}
2392
2393/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002394static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002395 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002396{
bellard8df1cd02005-01-28 22:37:22 +00002397 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002398 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00002399
Avi Kivityac1970f2012-10-03 16:22:53 +02002400 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00002401
Avi Kivityf3705d52012-03-08 16:16:34 +02002402 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002403 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002404 if (memory_region_is_ram(section->mr)) {
2405 section = &phys_sections[phys_section_rom];
2406 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002407#if defined(TARGET_WORDS_BIGENDIAN)
2408 if (endian == DEVICE_LITTLE_ENDIAN) {
2409 val = bswap32(val);
2410 }
2411#else
2412 if (endian == DEVICE_BIG_ENDIAN) {
2413 val = bswap32(val);
2414 }
2415#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002416 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002417 } else {
2418 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002419 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002420 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00002421 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002422 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002423 switch (endian) {
2424 case DEVICE_LITTLE_ENDIAN:
2425 stl_le_p(ptr, val);
2426 break;
2427 case DEVICE_BIG_ENDIAN:
2428 stl_be_p(ptr, val);
2429 break;
2430 default:
2431 stl_p(ptr, val);
2432 break;
2433 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002434 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002435 }
2436}
2437
Avi Kivitya8170e52012-10-23 12:30:10 +02002438void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002439{
2440 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2441}
2442
Avi Kivitya8170e52012-10-23 12:30:10 +02002443void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002444{
2445 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2446}
2447
Avi Kivitya8170e52012-10-23 12:30:10 +02002448void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002449{
2450 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2451}
2452
bellardaab33092005-10-30 20:48:42 +00002453/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002454void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002455{
2456 uint8_t v = val;
2457 cpu_physical_memory_write(addr, &v, 1);
2458}
2459
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002460/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002461static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002462 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002463{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002464 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002465 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002466
Avi Kivityac1970f2012-10-03 16:22:53 +02002467 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002468
Avi Kivityf3705d52012-03-08 16:16:34 +02002469 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00002470 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02002471 if (memory_region_is_ram(section->mr)) {
2472 section = &phys_sections[phys_section_rom];
2473 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002474#if defined(TARGET_WORDS_BIGENDIAN)
2475 if (endian == DEVICE_LITTLE_ENDIAN) {
2476 val = bswap16(val);
2477 }
2478#else
2479 if (endian == DEVICE_BIG_ENDIAN) {
2480 val = bswap16(val);
2481 }
2482#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02002483 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002484 } else {
2485 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002486 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00002487 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002488 /* RAM case */
2489 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002490 switch (endian) {
2491 case DEVICE_LITTLE_ENDIAN:
2492 stw_le_p(ptr, val);
2493 break;
2494 case DEVICE_BIG_ENDIAN:
2495 stw_be_p(ptr, val);
2496 break;
2497 default:
2498 stw_p(ptr, val);
2499 break;
2500 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002501 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002502 }
bellardaab33092005-10-30 20:48:42 +00002503}
2504
Avi Kivitya8170e52012-10-23 12:30:10 +02002505void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002506{
2507 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2508}
2509
Avi Kivitya8170e52012-10-23 12:30:10 +02002510void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002511{
2512 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2513}
2514
Avi Kivitya8170e52012-10-23 12:30:10 +02002515void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002516{
2517 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2518}
2519
bellardaab33092005-10-30 20:48:42 +00002520/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002521void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002522{
2523 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002524 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002525}
2526
Avi Kivitya8170e52012-10-23 12:30:10 +02002527void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002528{
2529 val = cpu_to_le64(val);
2530 cpu_physical_memory_write(addr, &val, 8);
2531}
2532
Avi Kivitya8170e52012-10-23 12:30:10 +02002533void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002534{
2535 val = cpu_to_be64(val);
2536 cpu_physical_memory_write(addr, &val, 8);
2537}
2538
aliguori5e2972f2009-03-28 17:51:36 +00002539/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002540int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002541 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002542{
2543 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002544 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002545 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002546
2547 while (len > 0) {
2548 page = addr & TARGET_PAGE_MASK;
2549 phys_addr = cpu_get_phys_page_debug(env, page);
2550 /* if no physical page mapped, return an error */
2551 if (phys_addr == -1)
2552 return -1;
2553 l = (page + TARGET_PAGE_SIZE) - addr;
2554 if (l > len)
2555 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002556 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002557 if (is_write)
2558 cpu_physical_memory_write_rom(phys_addr, buf, l);
2559 else
aliguori5e2972f2009-03-28 17:51:36 +00002560 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002561 len -= l;
2562 buf += l;
2563 addr += l;
2564 }
2565 return 0;
2566}
Paul Brooka68fe892010-03-01 00:08:59 +00002567#endif
bellard13eb76e2004-01-24 15:23:36 +00002568
Blue Swirl8e4a4242013-01-06 18:30:17 +00002569#if !defined(CONFIG_USER_ONLY)
2570
2571/*
2572 * A helper function for the _utterly broken_ virtio device model to find out if
2573 * it's running on a big endian machine. Don't do this at home kids!
2574 */
2575bool virtio_is_big_endian(void);
2576bool virtio_is_big_endian(void)
2577{
2578#if defined(TARGET_WORDS_BIGENDIAN)
2579 return true;
2580#else
2581 return false;
2582#endif
2583}
2584
2585#endif
2586
Wen Congyang76f35532012-05-07 12:04:18 +08002587#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002588bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002589{
2590 MemoryRegionSection *section;
2591
Avi Kivityac1970f2012-10-03 16:22:53 +02002592 section = phys_page_find(address_space_memory.dispatch,
2593 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08002594
2595 return !(memory_region_is_ram(section->mr) ||
2596 memory_region_is_romd(section->mr));
2597}
2598#endif