blob: 3ebc46a388e5850af4db8d3179b33daa53128689 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
pbrook67d3b952006-12-18 05:03:52 +000053//#define DEBUG_UNASSIGNED
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000057int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000058static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000059
Paolo Bonzinia3161032012-11-14 15:54:48 +010060RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030061
62static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030063static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030064
Avi Kivityf6790af2012-10-02 20:13:51 +020065AddressSpace address_space_io;
66AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100067DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Paolo Bonzini0844e002013-05-24 14:37:28 +020069MemoryRegion io_mem_rom, io_mem_notdirty;
70static MemoryRegion io_mem_unassigned, io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färber9349b4f2012-03-14 01:38:32 +010074CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010077DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Avi Kivity5312bd82012-02-12 18:32:55 +020085static MemoryRegionSection *phys_sections;
86static unsigned phys_sections_nb, phys_sections_nb_alloc;
87static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020088static uint16_t phys_section_notdirty;
89static uint16_t phys_section_rom;
90static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020091
Avi Kivityd6f2ea22012-02-12 20:12:49 +020092/* Simple allocator for PhysPageEntry nodes */
93static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
94static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
95
Avi Kivity07f07b32012-02-13 20:45:32 +020096#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020097
pbrooke2eef172008-06-08 01:09:01 +000098static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030099static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000100static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000101
Avi Kivity1ec9b902012-01-02 12:47:48 +0200102static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000103#endif
bellard54936002003-05-13 00:25:15 +0000104
Paul Brook6d9a1302010-02-28 23:55:53 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200106
Avi Kivityf7bf5462012-02-13 20:12:05 +0200107static void phys_map_node_reserve(unsigned nodes)
108{
109 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
110 typedef PhysPageEntry Node[L2_SIZE];
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
112 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
113 phys_map_nodes_nb + nodes);
114 phys_map_nodes = g_renew(Node, phys_map_nodes,
115 phys_map_nodes_nb_alloc);
116 }
117}
118
119static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200120{
121 unsigned i;
122 uint16_t ret;
123
Avi Kivityf7bf5462012-02-13 20:12:05 +0200124 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200125 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200126 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200128 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200129 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200130 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200131 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200132}
133
134static void phys_map_nodes_reset(void)
135{
136 phys_map_nodes_nb = 0;
137}
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139
Avi Kivitya8170e52012-10-23 12:30:10 +0200140static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
141 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200142 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200143{
144 PhysPageEntry *p;
145 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200146 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147
Avi Kivity07f07b32012-02-13 20:45:32 +0200148 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200149 lp->ptr = phys_map_node_alloc();
150 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 if (level == 0) {
152 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200153 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200154 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156 }
157 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200158 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200159 }
Avi Kivity29990972012-02-13 20:21:20 +0200160 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200161
Avi Kivity29990972012-02-13 20:21:20 +0200162 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if ((*index & (step - 1)) == 0 && *nb >= step) {
164 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200165 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200166 *index += step;
167 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200168 } else {
169 phys_page_set_level(lp, index, nb, leaf, level - 1);
170 }
171 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172 }
173}
174
Avi Kivityac1970f2012-10-03 16:22:53 +0200175static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200176 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200177 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000178{
Avi Kivity29990972012-02-13 20:21:20 +0200179 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200180 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000181
Avi Kivityac1970f2012-10-03 16:22:53 +0200182 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000183}
184
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200185static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000186{
Avi Kivityac1970f2012-10-03 16:22:53 +0200187 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200188 PhysPageEntry *p;
189 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200190
Avi Kivity07f07b32012-02-13 20:45:32 +0200191 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200192 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinifd298932013-05-20 12:21:07 +0200193 return &phys_sections[phys_section_unassigned];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200194 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200195 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200196 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200197 }
Paolo Bonzinifd298932013-05-20 12:21:07 +0200198 return &phys_sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200199}
200
Blue Swirle5548612012-04-21 13:08:33 +0000201bool memory_region_is_unassigned(MemoryRegion *mr)
202{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200203 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000204 && mr != &io_mem_watch;
205}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200206
207MemoryRegionSection *address_space_translate(AddressSpace *as, hwaddr addr,
208 hwaddr *xlat, hwaddr *plen,
209 bool is_write)
210{
211 MemoryRegionSection *section;
212 Int128 diff;
213
214 section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
215 /* Compute offset within MemoryRegionSection */
216 addr -= section->offset_within_address_space;
217
218 /* Compute offset within MemoryRegion */
219 *xlat = addr + section->offset_within_region;
220
221 diff = int128_sub(section->mr->size, int128_make64(addr));
222 *plen = MIN(int128_get64(diff), *plen);
223 return section;
224}
bellard9fa3e852004-01-04 18:06:42 +0000225#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000226
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200227void cpu_exec_init_all(void)
228{
229#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700230 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200231 memory_map_init();
232 io_mem_init();
233#endif
234}
235
Andreas Färberb170fce2013-01-20 20:23:22 +0100236#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000237
Juan Quintelae59fb372009-09-29 22:48:21 +0200238static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200239{
Andreas Färber259186a2013-01-17 18:51:17 +0100240 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200241
aurel323098dba2009-03-07 21:28:24 +0000242 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
243 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100244 cpu->interrupt_request &= ~0x01;
245 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000246
247 return 0;
248}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200249
250static const VMStateDescription vmstate_cpu_common = {
251 .name = "cpu_common",
252 .version_id = 1,
253 .minimum_version_id = 1,
254 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200255 .post_load = cpu_common_post_load,
256 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100257 VMSTATE_UINT32(halted, CPUState),
258 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200259 VMSTATE_END_OF_LIST()
260 }
261};
Andreas Färberb170fce2013-01-20 20:23:22 +0100262#else
263#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000264#endif
265
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100266CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400267{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100268 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100269 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400270
271 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100272 cpu = ENV_GET_CPU(env);
273 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400274 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100275 }
Glauber Costa950f1472009-06-09 12:15:18 -0400276 env = env->next_cpu;
277 }
278
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100279 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400280}
281
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200282void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
283{
284 CPUArchState *env = first_cpu;
285
286 while (env) {
287 func(ENV_GET_CPU(env), data);
288 env = env->next_cpu;
289 }
290}
291
Andreas Färber9349b4f2012-03-14 01:38:32 +0100292void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000293{
Andreas Färber9f09e182012-05-03 06:59:07 +0200294 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100295 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100296 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000297 int cpu_index;
298
pbrookc2764712009-03-07 15:24:59 +0000299#if defined(CONFIG_USER_ONLY)
300 cpu_list_lock();
301#endif
bellard6a00d602005-11-21 23:25:50 +0000302 env->next_cpu = NULL;
303 penv = &first_cpu;
304 cpu_index = 0;
305 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700306 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000307 cpu_index++;
308 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100309 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100310 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000311 QTAILQ_INIT(&env->breakpoints);
312 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100313#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200314 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100315#endif
bellard6a00d602005-11-21 23:25:50 +0000316 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000317#if defined(CONFIG_USER_ONLY)
318 cpu_list_unlock();
319#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100320 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000321#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600322 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000323 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100324 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000325#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100326 if (cc->vmsd != NULL) {
327 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
328 }
bellardfd6ce8f2003-05-14 19:00:11 +0000329}
330
bellard1fddef42005-04-17 19:16:13 +0000331#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000332#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100333static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000334{
335 tb_invalidate_phys_page_range(pc, pc + 1, 0);
336}
337#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400338static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
339{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400340 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
341 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400342}
bellardc27004e2005-01-03 23:35:10 +0000343#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000344#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000345
Paul Brookc527ee82010-03-01 03:31:14 +0000346#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100347void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000348
349{
350}
351
Andreas Färber9349b4f2012-03-14 01:38:32 +0100352int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000353 int flags, CPUWatchpoint **watchpoint)
354{
355 return -ENOSYS;
356}
357#else
pbrook6658ffb2007-03-16 23:58:11 +0000358/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100359int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000360 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000361{
aliguorib4051332008-11-18 20:14:20 +0000362 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000363 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000364
aliguorib4051332008-11-18 20:14:20 +0000365 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400366 if ((len & (len - 1)) || (addr & ~len_mask) ||
367 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000368 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
369 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
370 return -EINVAL;
371 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500372 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000373
aliguoria1d1bb32008-11-18 20:07:32 +0000374 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000375 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000376 wp->flags = flags;
377
aliguori2dc9f412008-11-18 20:56:59 +0000378 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000379 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000380 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000381 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000382 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000383
pbrook6658ffb2007-03-16 23:58:11 +0000384 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000385
386 if (watchpoint)
387 *watchpoint = wp;
388 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000389}
390
aliguoria1d1bb32008-11-18 20:07:32 +0000391/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100392int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000393 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000394{
aliguorib4051332008-11-18 20:14:20 +0000395 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000396 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000397
Blue Swirl72cf2d42009-09-12 07:36:22 +0000398 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000399 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000400 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000401 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000402 return 0;
403 }
404 }
aliguoria1d1bb32008-11-18 20:07:32 +0000405 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000406}
407
aliguoria1d1bb32008-11-18 20:07:32 +0000408/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100409void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000410{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000411 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000412
aliguoria1d1bb32008-11-18 20:07:32 +0000413 tlb_flush_page(env, watchpoint->vaddr);
414
Anthony Liguori7267c092011-08-20 22:09:37 -0500415 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000416}
417
aliguoria1d1bb32008-11-18 20:07:32 +0000418/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100419void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000420{
aliguoric0ce9982008-11-25 22:13:57 +0000421 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000422
Blue Swirl72cf2d42009-09-12 07:36:22 +0000423 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000424 if (wp->flags & mask)
425 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000426 }
aliguoria1d1bb32008-11-18 20:07:32 +0000427}
Paul Brookc527ee82010-03-01 03:31:14 +0000428#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000429
430/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100431int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000432 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000433{
bellard1fddef42005-04-17 19:16:13 +0000434#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000435 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000436
Anthony Liguori7267c092011-08-20 22:09:37 -0500437 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000438
439 bp->pc = pc;
440 bp->flags = flags;
441
aliguori2dc9f412008-11-18 20:56:59 +0000442 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000443 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000444 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000445 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000446 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000447
448 breakpoint_invalidate(env, pc);
449
450 if (breakpoint)
451 *breakpoint = bp;
452 return 0;
453#else
454 return -ENOSYS;
455#endif
456}
457
458/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100459int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000460{
461#if defined(TARGET_HAS_ICE)
462 CPUBreakpoint *bp;
463
Blue Swirl72cf2d42009-09-12 07:36:22 +0000464 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000465 if (bp->pc == pc && bp->flags == flags) {
466 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000467 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000468 }
bellard4c3a88a2003-07-26 12:06:08 +0000469 }
aliguoria1d1bb32008-11-18 20:07:32 +0000470 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000471#else
aliguoria1d1bb32008-11-18 20:07:32 +0000472 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000473#endif
474}
475
aliguoria1d1bb32008-11-18 20:07:32 +0000476/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100477void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000478{
bellard1fddef42005-04-17 19:16:13 +0000479#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000480 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000481
aliguoria1d1bb32008-11-18 20:07:32 +0000482 breakpoint_invalidate(env, breakpoint->pc);
483
Anthony Liguori7267c092011-08-20 22:09:37 -0500484 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000485#endif
486}
487
488/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100489void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000490{
491#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000492 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000493
Blue Swirl72cf2d42009-09-12 07:36:22 +0000494 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000495 if (bp->flags & mask)
496 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000497 }
bellard4c3a88a2003-07-26 12:06:08 +0000498#endif
499}
500
bellardc33a3462003-07-29 20:50:33 +0000501/* enable or disable single step mode. EXCP_DEBUG is returned by the
502 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100503void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000504{
bellard1fddef42005-04-17 19:16:13 +0000505#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000506 if (env->singlestep_enabled != enabled) {
507 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000508 if (kvm_enabled())
509 kvm_update_guest_debug(env, 0);
510 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100511 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000512 /* XXX: only flush what is necessary */
513 tb_flush(env);
514 }
bellardc33a3462003-07-29 20:50:33 +0000515 }
516#endif
517}
518
Andreas Färber9349b4f2012-03-14 01:38:32 +0100519void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000520{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100521 CPUState *cpu = ENV_GET_CPU(env);
522
523 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000524 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000525}
526
Andreas Färber9349b4f2012-03-14 01:38:32 +0100527void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000528{
529 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000530 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000531
532 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000533 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000534 fprintf(stderr, "qemu: fatal: ");
535 vfprintf(stderr, fmt, ap);
536 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100537 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000538 if (qemu_log_enabled()) {
539 qemu_log("qemu: fatal: ");
540 qemu_log_vprintf(fmt, ap2);
541 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100542 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000543 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000544 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000545 }
pbrook493ae1f2007-11-23 16:53:59 +0000546 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000547 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200548#if defined(CONFIG_USER_ONLY)
549 {
550 struct sigaction act;
551 sigfillset(&act.sa_mask);
552 act.sa_handler = SIG_DFL;
553 sigaction(SIGABRT, &act, NULL);
554 }
555#endif
bellard75012672003-06-21 13:11:07 +0000556 abort();
557}
558
Andreas Färber9349b4f2012-03-14 01:38:32 +0100559CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000560{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100561 CPUArchState *new_env = cpu_init(env->cpu_model_str);
562 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000563#if defined(TARGET_HAS_ICE)
564 CPUBreakpoint *bp;
565 CPUWatchpoint *wp;
566#endif
567
Andreas Färber9349b4f2012-03-14 01:38:32 +0100568 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000569
Andreas Färber55e5c282012-12-17 06:18:02 +0100570 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000571 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000572
573 /* Clone all break/watchpoints.
574 Note: Once we support ptrace with hw-debug register access, make sure
575 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000576 QTAILQ_INIT(&env->breakpoints);
577 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000578#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000579 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000580 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
581 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000582 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000583 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
584 wp->flags, NULL);
585 }
586#endif
587
thsc5be9f02007-02-28 20:20:53 +0000588 return new_env;
589}
590
bellard01243112004-01-04 15:48:17 +0000591#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200592static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
593 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000594{
Juan Quintelad24981d2012-05-22 00:42:40 +0200595 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000596
bellard1ccde1c2004-02-06 19:46:14 +0000597 /* we modify the TLB cache so that the dirty bit will be set again
598 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200599 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200600 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000601 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200602 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000603 != (end - 1) - start) {
604 abort();
605 }
Blue Swirle5548612012-04-21 13:08:33 +0000606 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200607
608}
609
610/* Note: start and end must be within the same ram block. */
611void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
612 int dirty_flags)
613{
614 uintptr_t length;
615
616 start &= TARGET_PAGE_MASK;
617 end = TARGET_PAGE_ALIGN(end);
618
619 length = end - start;
620 if (length == 0)
621 return;
622 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
623
624 if (tcg_enabled()) {
625 tlb_reset_dirty_range_all(start, end, length);
626 }
bellard1ccde1c2004-02-06 19:46:14 +0000627}
628
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000629static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000630{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200631 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000632 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200633 return ret;
aliguori74576192008-10-06 14:02:03 +0000634}
635
Avi Kivitya8170e52012-10-23 12:30:10 +0200636hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200637 MemoryRegionSection *section,
638 target_ulong vaddr,
639 hwaddr paddr, hwaddr xlat,
640 int prot,
641 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000642{
Avi Kivitya8170e52012-10-23 12:30:10 +0200643 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000644 CPUWatchpoint *wp;
645
Blue Swirlcc5bea62012-04-14 14:56:48 +0000646 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000647 /* Normal RAM. */
648 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200649 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000650 if (!section->readonly) {
651 iotlb |= phys_section_notdirty;
652 } else {
653 iotlb |= phys_section_rom;
654 }
655 } else {
Blue Swirle5548612012-04-21 13:08:33 +0000656 iotlb = section - phys_sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200657 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000658 }
659
660 /* Make accesses to pages with watchpoints go via the
661 watchpoint trap routines. */
662 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
663 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
664 /* Avoid trapping reads of pages with a write breakpoint. */
665 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
666 iotlb = phys_section_watch + paddr;
667 *address |= TLB_MMIO;
668 break;
669 }
670 }
671 }
672
673 return iotlb;
674}
bellard9fa3e852004-01-04 18:06:42 +0000675#endif /* defined(CONFIG_USER_ONLY) */
676
pbrooke2eef172008-06-08 01:09:01 +0000677#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000678
Paul Brookc04b2b72010-03-01 03:31:14 +0000679#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
680typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200681 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200682 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200683 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000684} subpage_t;
685
Anthony Liguoric227f092009-10-01 16:12:16 -0500686static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200687 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200688static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200689static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200690{
Avi Kivity5312bd82012-02-12 18:32:55 +0200691 MemoryRegionSection *section = &phys_sections[section_index];
692 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200693
694 if (mr->subpage) {
695 subpage_t *subpage = container_of(mr, subpage_t, iomem);
696 memory_region_destroy(&subpage->iomem);
697 g_free(subpage);
698 }
699}
700
Avi Kivity4346ae32012-02-10 17:00:01 +0200701static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200702{
703 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200704 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200705
Avi Kivityc19e8802012-02-13 20:25:31 +0200706 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200707 return;
708 }
709
Avi Kivityc19e8802012-02-13 20:25:31 +0200710 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200711 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200712 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200713 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200714 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200715 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200716 }
Avi Kivity54688b12012-02-09 17:34:32 +0200717 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200718 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200719 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200720}
721
Avi Kivityac1970f2012-10-03 16:22:53 +0200722static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200723{
Avi Kivityac1970f2012-10-03 16:22:53 +0200724 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200725 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200726}
727
Avi Kivity5312bd82012-02-12 18:32:55 +0200728static uint16_t phys_section_add(MemoryRegionSection *section)
729{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200730 /* The physical section number is ORed with a page-aligned
731 * pointer to produce the iotlb entries. Thus it should
732 * never overflow into the page-aligned value.
733 */
734 assert(phys_sections_nb < TARGET_PAGE_SIZE);
735
Avi Kivity5312bd82012-02-12 18:32:55 +0200736 if (phys_sections_nb == phys_sections_nb_alloc) {
737 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
738 phys_sections = g_renew(MemoryRegionSection, phys_sections,
739 phys_sections_nb_alloc);
740 }
741 phys_sections[phys_sections_nb] = *section;
742 return phys_sections_nb++;
743}
744
745static void phys_sections_clear(void)
746{
747 phys_sections_nb = 0;
748}
749
Avi Kivityac1970f2012-10-03 16:22:53 +0200750static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200751{
752 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200753 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200754 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200755 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200756 MemoryRegionSection subsection = {
757 .offset_within_address_space = base,
758 .size = TARGET_PAGE_SIZE,
759 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200760 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200761
Avi Kivityf3705d52012-03-08 16:16:34 +0200762 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200763
Avi Kivityf3705d52012-03-08 16:16:34 +0200764 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200765 subpage = subpage_init(base);
766 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200767 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200768 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200769 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200770 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200771 }
772 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400773 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200774 subpage_register(subpage, start, end, phys_section_add(section));
775}
776
777
Avi Kivityac1970f2012-10-03 16:22:53 +0200778static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000779{
Avi Kivitya8170e52012-10-23 12:30:10 +0200780 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200781 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200782 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200783 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200784
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200785 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200786
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200787 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200788 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200789 section_index);
bellard33417e72003-08-10 21:47:01 +0000790}
791
Avi Kivity86a86232012-10-30 13:47:45 +0200792QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
793
794static MemoryRegionSection limit(MemoryRegionSection section)
795{
796 section.size = MIN(section.offset_within_address_space + section.size,
797 MAX_PHYS_ADDR + 1)
798 - section.offset_within_address_space;
799
800 return section;
801}
802
Avi Kivityac1970f2012-10-03 16:22:53 +0200803static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200804{
Avi Kivityac1970f2012-10-03 16:22:53 +0200805 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity86a86232012-10-30 13:47:45 +0200806 MemoryRegionSection now = limit(*section), remain = limit(*section);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200807
808 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
809 || (now.size < TARGET_PAGE_SIZE)) {
810 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
811 - now.offset_within_address_space,
812 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200813 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200814 remain.size -= now.size;
815 remain.offset_within_address_space += now.size;
816 remain.offset_within_region += now.size;
817 }
Tyler Hall69b67642012-07-25 18:45:04 -0400818 while (remain.size >= TARGET_PAGE_SIZE) {
819 now = remain;
820 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
821 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200822 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400823 } else {
824 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200825 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400826 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200827 remain.size -= now.size;
828 remain.offset_within_address_space += now.size;
829 remain.offset_within_region += now.size;
830 }
831 now = remain;
832 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200833 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200834 }
835}
836
Sheng Yang62a27442010-01-26 19:21:16 +0800837void qemu_flush_coalesced_mmio_buffer(void)
838{
839 if (kvm_enabled())
840 kvm_flush_coalesced_mmio_buffer();
841}
842
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700843void qemu_mutex_lock_ramlist(void)
844{
845 qemu_mutex_lock(&ram_list.mutex);
846}
847
848void qemu_mutex_unlock_ramlist(void)
849{
850 qemu_mutex_unlock(&ram_list.mutex);
851}
852
Marcelo Tosattic9027602010-03-01 20:25:08 -0300853#if defined(__linux__) && !defined(TARGET_S390X)
854
855#include <sys/vfs.h>
856
857#define HUGETLBFS_MAGIC 0x958458f6
858
859static long gethugepagesize(const char *path)
860{
861 struct statfs fs;
862 int ret;
863
864 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900865 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300866 } while (ret != 0 && errno == EINTR);
867
868 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900869 perror(path);
870 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300871 }
872
873 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900874 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300875
876 return fs.f_bsize;
877}
878
Alex Williamson04b16652010-07-02 11:13:17 -0600879static void *file_ram_alloc(RAMBlock *block,
880 ram_addr_t memory,
881 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300882{
883 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500884 char *sanitized_name;
885 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300886 void *area;
887 int fd;
888#ifdef MAP_POPULATE
889 int flags;
890#endif
891 unsigned long hpagesize;
892
893 hpagesize = gethugepagesize(path);
894 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900895 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300896 }
897
898 if (memory < hpagesize) {
899 return NULL;
900 }
901
902 if (kvm_enabled() && !kvm_has_sync_mmu()) {
903 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
904 return NULL;
905 }
906
Peter Feiner8ca761f2013-03-04 13:54:25 -0500907 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
908 sanitized_name = g_strdup(block->mr->name);
909 for (c = sanitized_name; *c != '\0'; c++) {
910 if (*c == '/')
911 *c = '_';
912 }
913
914 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
915 sanitized_name);
916 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300917
918 fd = mkstemp(filename);
919 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900920 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100921 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900922 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300923 }
924 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100925 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300926
927 memory = (memory+hpagesize-1) & ~(hpagesize-1);
928
929 /*
930 * ftruncate is not supported by hugetlbfs in older
931 * hosts, so don't bother bailing out on errors.
932 * If anything goes wrong with it under other filesystems,
933 * mmap will fail.
934 */
935 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900936 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300937
938#ifdef MAP_POPULATE
939 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
940 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
941 * to sidestep this quirk.
942 */
943 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
944 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
945#else
946 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
947#endif
948 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900949 perror("file_ram_alloc: can't mmap RAM pages");
950 close(fd);
951 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300952 }
Alex Williamson04b16652010-07-02 11:13:17 -0600953 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300954 return area;
955}
956#endif
957
Alex Williamsond17b5282010-06-25 11:08:38 -0600958static ram_addr_t find_ram_offset(ram_addr_t size)
959{
Alex Williamson04b16652010-07-02 11:13:17 -0600960 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600961 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600962
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100963 assert(size != 0); /* it would hand out same offset multiple times */
964
Paolo Bonzinia3161032012-11-14 15:54:48 +0100965 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600966 return 0;
967
Paolo Bonzinia3161032012-11-14 15:54:48 +0100968 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000969 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600970
971 end = block->offset + block->length;
972
Paolo Bonzinia3161032012-11-14 15:54:48 +0100973 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600974 if (next_block->offset >= end) {
975 next = MIN(next, next_block->offset);
976 }
977 }
978 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600979 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600980 mingap = next - end;
981 }
982 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600983
984 if (offset == RAM_ADDR_MAX) {
985 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
986 (uint64_t)size);
987 abort();
988 }
989
Alex Williamson04b16652010-07-02 11:13:17 -0600990 return offset;
991}
992
Juan Quintela652d7ec2012-07-20 10:37:54 +0200993ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600994{
Alex Williamsond17b5282010-06-25 11:08:38 -0600995 RAMBlock *block;
996 ram_addr_t last = 0;
997
Paolo Bonzinia3161032012-11-14 15:54:48 +0100998 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -0600999 last = MAX(last, block->offset + block->length);
1000
1001 return last;
1002}
1003
Jason Baronddb97f12012-08-02 15:44:16 -04001004static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1005{
1006 int ret;
1007 QemuOpts *machine_opts;
1008
1009 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1010 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1011 if (machine_opts &&
1012 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1013 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1014 if (ret) {
1015 perror("qemu_madvise");
1016 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1017 "but dump_guest_core=off specified\n");
1018 }
1019 }
1020}
1021
Avi Kivityc5705a72011-12-20 15:59:12 +02001022void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001023{
1024 RAMBlock *new_block, *block;
1025
Avi Kivityc5705a72011-12-20 15:59:12 +02001026 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001027 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001028 if (block->offset == addr) {
1029 new_block = block;
1030 break;
1031 }
1032 }
1033 assert(new_block);
1034 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001035
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001036 if (dev) {
1037 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001038 if (id) {
1039 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001040 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001041 }
1042 }
1043 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1044
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001045 /* This assumes the iothread lock is taken here too. */
1046 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001047 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001048 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001049 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1050 new_block->idstr);
1051 abort();
1052 }
1053 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001054 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001055}
1056
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001057static int memory_try_enable_merging(void *addr, size_t len)
1058{
1059 QemuOpts *opts;
1060
1061 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1062 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1063 /* disabled by the user */
1064 return 0;
1065 }
1066
1067 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1068}
1069
Avi Kivityc5705a72011-12-20 15:59:12 +02001070ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1071 MemoryRegion *mr)
1072{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001073 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001074
1075 size = TARGET_PAGE_ALIGN(size);
1076 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001077
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001078 /* This assumes the iothread lock is taken here too. */
1079 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001080 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001081 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001082 if (host) {
1083 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001084 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001085 } else {
1086 if (mem_path) {
1087#if defined (__linux__) && !defined(TARGET_S390X)
1088 new_block->host = file_ram_alloc(new_block, size, mem_path);
1089 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001090 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001091 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001092 }
1093#else
1094 fprintf(stderr, "-mem-path option unsupported\n");
1095 exit(1);
1096#endif
1097 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001098 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001099 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001100 } else if (kvm_enabled()) {
1101 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001102 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001103 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001104 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001105 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001106 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001107 }
1108 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001109 new_block->length = size;
1110
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001111 /* Keep the list sorted from biggest to smallest block. */
1112 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1113 if (block->length < new_block->length) {
1114 break;
1115 }
1116 }
1117 if (block) {
1118 QTAILQ_INSERT_BEFORE(block, new_block, next);
1119 } else {
1120 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1121 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001122 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001123
Umesh Deshpandef798b072011-08-18 11:41:17 -07001124 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001125 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001126
Anthony Liguori7267c092011-08-20 22:09:37 -05001127 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001128 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001129 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1130 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001131 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001132
Jason Baronddb97f12012-08-02 15:44:16 -04001133 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001134 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001135
Cam Macdonell84b89d72010-07-26 18:10:57 -06001136 if (kvm_enabled())
1137 kvm_setup_guest_memory(new_block->host, size);
1138
1139 return new_block->offset;
1140}
1141
Avi Kivityc5705a72011-12-20 15:59:12 +02001142ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001143{
Avi Kivityc5705a72011-12-20 15:59:12 +02001144 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001145}
bellarde9a1ab12007-02-08 23:08:38 +00001146
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001147void qemu_ram_free_from_ptr(ram_addr_t addr)
1148{
1149 RAMBlock *block;
1150
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001151 /* This assumes the iothread lock is taken here too. */
1152 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001153 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001154 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001155 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001156 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001157 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001158 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001159 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001160 }
1161 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001162 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001163}
1164
Anthony Liguoric227f092009-10-01 16:12:16 -05001165void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001166{
Alex Williamson04b16652010-07-02 11:13:17 -06001167 RAMBlock *block;
1168
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001169 /* This assumes the iothread lock is taken here too. */
1170 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001171 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001172 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001173 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001174 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001175 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001176 if (block->flags & RAM_PREALLOC_MASK) {
1177 ;
1178 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001179#if defined (__linux__) && !defined(TARGET_S390X)
1180 if (block->fd) {
1181 munmap(block->host, block->length);
1182 close(block->fd);
1183 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001184 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001185 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001186#else
1187 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001188#endif
1189 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001190 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001191 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001192 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001193 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001194 }
Alex Williamson04b16652010-07-02 11:13:17 -06001195 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001196 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001197 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001198 }
1199 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001200 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001201
bellarde9a1ab12007-02-08 23:08:38 +00001202}
1203
Huang Yingcd19cfa2011-03-02 08:56:19 +01001204#ifndef _WIN32
1205void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1206{
1207 RAMBlock *block;
1208 ram_addr_t offset;
1209 int flags;
1210 void *area, *vaddr;
1211
Paolo Bonzinia3161032012-11-14 15:54:48 +01001212 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001213 offset = addr - block->offset;
1214 if (offset < block->length) {
1215 vaddr = block->host + offset;
1216 if (block->flags & RAM_PREALLOC_MASK) {
1217 ;
1218 } else {
1219 flags = MAP_FIXED;
1220 munmap(vaddr, length);
1221 if (mem_path) {
1222#if defined(__linux__) && !defined(TARGET_S390X)
1223 if (block->fd) {
1224#ifdef MAP_POPULATE
1225 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1226 MAP_PRIVATE;
1227#else
1228 flags |= MAP_PRIVATE;
1229#endif
1230 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1231 flags, block->fd, offset);
1232 } else {
1233 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1234 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1235 flags, -1, 0);
1236 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001237#else
1238 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001239#endif
1240 } else {
1241#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1242 flags |= MAP_SHARED | MAP_ANONYMOUS;
1243 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1244 flags, -1, 0);
1245#else
1246 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1247 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1248 flags, -1, 0);
1249#endif
1250 }
1251 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001252 fprintf(stderr, "Could not remap addr: "
1253 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001254 length, addr);
1255 exit(1);
1256 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001257 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001258 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001259 }
1260 return;
1261 }
1262 }
1263}
1264#endif /* !_WIN32 */
1265
pbrookdc828ca2009-04-09 22:21:07 +00001266/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001267 With the exception of the softmmu code in this file, this should
1268 only be used for local memory (e.g. video ram) that the device owns,
1269 and knows it isn't going to access beyond the end of the block.
1270
1271 It should not be used for general purpose DMA.
1272 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1273 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001274void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001275{
pbrook94a6b542009-04-11 17:15:54 +00001276 RAMBlock *block;
1277
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001278 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001279 block = ram_list.mru_block;
1280 if (block && addr - block->offset < block->length) {
1281 goto found;
1282 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001283 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001284 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001285 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001286 }
pbrook94a6b542009-04-11 17:15:54 +00001287 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001288
1289 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1290 abort();
1291
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001292found:
1293 ram_list.mru_block = block;
1294 if (xen_enabled()) {
1295 /* We need to check if the requested address is in the RAM
1296 * because we don't want to map the entire memory in QEMU.
1297 * In that case just map until the end of the page.
1298 */
1299 if (block->offset == 0) {
1300 return xen_map_cache(addr, 0, 0);
1301 } else if (block->host == NULL) {
1302 block->host =
1303 xen_map_cache(block->offset, block->length, 1);
1304 }
1305 }
1306 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001307}
1308
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001309/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1310 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1311 *
1312 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001313 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001314static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001315{
1316 RAMBlock *block;
1317
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001318 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001319 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001320 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001321 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001322 /* We need to check if the requested address is in the RAM
1323 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001324 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001325 */
1326 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001327 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001328 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001329 block->host =
1330 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001331 }
1332 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001333 return block->host + (addr - block->offset);
1334 }
1335 }
1336
1337 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1338 abort();
1339
1340 return NULL;
1341}
1342
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001343/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1344 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001345static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001346{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001347 if (*size == 0) {
1348 return NULL;
1349 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001350 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001351 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001352 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001353 RAMBlock *block;
1354
Paolo Bonzinia3161032012-11-14 15:54:48 +01001355 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001356 if (addr - block->offset < block->length) {
1357 if (addr - block->offset + *size > block->length)
1358 *size = block->length - addr + block->offset;
1359 return block->host + (addr - block->offset);
1360 }
1361 }
1362
1363 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1364 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001365 }
1366}
1367
Marcelo Tosattie8902612010-10-11 15:31:19 -03001368int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001369{
pbrook94a6b542009-04-11 17:15:54 +00001370 RAMBlock *block;
1371 uint8_t *host = ptr;
1372
Jan Kiszka868bb332011-06-21 22:59:09 +02001373 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001374 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001375 return 0;
1376 }
1377
Paolo Bonzinia3161032012-11-14 15:54:48 +01001378 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001379 /* This case append when the block is not mapped. */
1380 if (block->host == NULL) {
1381 continue;
1382 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001383 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001384 *ram_addr = block->offset + (host - block->host);
1385 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001386 }
pbrook94a6b542009-04-11 17:15:54 +00001387 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001388
Marcelo Tosattie8902612010-10-11 15:31:19 -03001389 return -1;
1390}
Alex Williamsonf471a172010-06-11 11:11:42 -06001391
Marcelo Tosattie8902612010-10-11 15:31:19 -03001392/* Some of the softmmu routines need to translate from a host pointer
1393 (typically a TLB entry) back to a ram offset. */
1394ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1395{
1396 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001397
Marcelo Tosattie8902612010-10-11 15:31:19 -03001398 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1399 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1400 abort();
1401 }
1402 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001403}
1404
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001405static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
1406 unsigned size, bool is_write)
bellard33417e72003-08-10 21:47:01 +00001407{
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001408 return false;
blueswir1e18231a2008-10-06 18:46:28 +00001409}
1410
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001411const MemoryRegionOps unassigned_mem_ops = {
1412 .valid.accepts = unassigned_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001413 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00001414};
1415
Avi Kivitya8170e52012-10-23 12:30:10 +02001416static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001417 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001418{
bellard3a7d9292005-08-21 09:26:42 +00001419 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001420 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001421 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001422 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001423 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001424 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001425 switch (size) {
1426 case 1:
1427 stb_p(qemu_get_ram_ptr(ram_addr), val);
1428 break;
1429 case 2:
1430 stw_p(qemu_get_ram_ptr(ram_addr), val);
1431 break;
1432 case 4:
1433 stl_p(qemu_get_ram_ptr(ram_addr), val);
1434 break;
1435 default:
1436 abort();
1437 }
bellardf23db162005-08-21 19:12:28 +00001438 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001439 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001440 /* we remove the notdirty callback only if the code has been
1441 flushed */
1442 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001443 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001444}
1445
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001446static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1447 unsigned size, bool is_write)
1448{
1449 return is_write;
1450}
1451
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001452static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001453 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001454 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001455 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001456};
1457
pbrook0f459d12008-06-09 00:20:13 +00001458/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001459static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001460{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001461 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001462 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001463 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001464 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001465 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001466
aliguori06d55cc2008-11-18 20:24:06 +00001467 if (env->watchpoint_hit) {
1468 /* We re-entered the check after replacing the TB. Now raise
1469 * the debug interrupt so that is will trigger after the
1470 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001471 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001472 return;
1473 }
pbrook2e70f6e2008-06-29 01:03:05 +00001474 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001475 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001476 if ((vaddr == (wp->vaddr & len_mask) ||
1477 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001478 wp->flags |= BP_WATCHPOINT_HIT;
1479 if (!env->watchpoint_hit) {
1480 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001481 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001482 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1483 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001484 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001485 } else {
1486 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1487 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001488 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001489 }
aliguori06d55cc2008-11-18 20:24:06 +00001490 }
aliguori6e140f22008-11-18 20:37:55 +00001491 } else {
1492 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001493 }
1494 }
1495}
1496
pbrook6658ffb2007-03-16 23:58:11 +00001497/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1498 so these check for a hit then pass through to the normal out-of-line
1499 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001500static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001501 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001502{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001503 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1504 switch (size) {
1505 case 1: return ldub_phys(addr);
1506 case 2: return lduw_phys(addr);
1507 case 4: return ldl_phys(addr);
1508 default: abort();
1509 }
pbrook6658ffb2007-03-16 23:58:11 +00001510}
1511
Avi Kivitya8170e52012-10-23 12:30:10 +02001512static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001513 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001514{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001515 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1516 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001517 case 1:
1518 stb_phys(addr, val);
1519 break;
1520 case 2:
1521 stw_phys(addr, val);
1522 break;
1523 case 4:
1524 stl_phys(addr, val);
1525 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001526 default: abort();
1527 }
pbrook6658ffb2007-03-16 23:58:11 +00001528}
1529
Avi Kivity1ec9b902012-01-02 12:47:48 +02001530static const MemoryRegionOps watch_mem_ops = {
1531 .read = watch_mem_read,
1532 .write = watch_mem_write,
1533 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001534};
pbrook6658ffb2007-03-16 23:58:11 +00001535
Avi Kivitya8170e52012-10-23 12:30:10 +02001536static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001537 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001538{
Avi Kivity70c68e42012-01-02 12:32:48 +02001539 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001540 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001541 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001542#if defined(DEBUG_SUBPAGE)
1543 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1544 mmio, len, addr, idx);
1545#endif
blueswir1db7b5422007-05-26 17:36:03 +00001546
Avi Kivity5312bd82012-02-12 18:32:55 +02001547 section = &phys_sections[mmio->sub_section[idx]];
1548 addr += mmio->base;
1549 addr -= section->offset_within_address_space;
1550 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001551 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00001552}
1553
Avi Kivitya8170e52012-10-23 12:30:10 +02001554static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001555 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001556{
Avi Kivity70c68e42012-01-02 12:32:48 +02001557 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001558 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001559 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001560#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001561 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1562 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001563 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001564#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001565
Avi Kivity5312bd82012-02-12 18:32:55 +02001566 section = &phys_sections[mmio->sub_section[idx]];
1567 addr += mmio->base;
1568 addr -= section->offset_within_address_space;
1569 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001570 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001571}
1572
Avi Kivity70c68e42012-01-02 12:32:48 +02001573static const MemoryRegionOps subpage_ops = {
1574 .read = subpage_read,
1575 .write = subpage_write,
1576 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001577};
1578
Avi Kivitya8170e52012-10-23 12:30:10 +02001579static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001580 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001581{
1582 ram_addr_t raddr = addr;
1583 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001584 switch (size) {
1585 case 1: return ldub_p(ptr);
1586 case 2: return lduw_p(ptr);
1587 case 4: return ldl_p(ptr);
1588 default: abort();
1589 }
Andreas Färber56384e82011-11-30 16:26:21 +01001590}
1591
Avi Kivitya8170e52012-10-23 12:30:10 +02001592static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001593 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001594{
1595 ram_addr_t raddr = addr;
1596 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001597 switch (size) {
1598 case 1: return stb_p(ptr, value);
1599 case 2: return stw_p(ptr, value);
1600 case 4: return stl_p(ptr, value);
1601 default: abort();
1602 }
Andreas Färber56384e82011-11-30 16:26:21 +01001603}
1604
Avi Kivityde712f92012-01-02 12:41:07 +02001605static const MemoryRegionOps subpage_ram_ops = {
1606 .read = subpage_ram_read,
1607 .write = subpage_ram_write,
1608 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001609};
1610
Anthony Liguoric227f092009-10-01 16:12:16 -05001611static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001612 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001613{
1614 int idx, eidx;
1615
1616 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1617 return -1;
1618 idx = SUBPAGE_IDX(start);
1619 eidx = SUBPAGE_IDX(end);
1620#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001621 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001622 mmio, start, end, idx, eidx, memory);
1623#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001624 if (memory_region_is_ram(phys_sections[section].mr)) {
1625 MemoryRegionSection new_section = phys_sections[section];
1626 new_section.mr = &io_mem_subpage_ram;
1627 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001628 }
blueswir1db7b5422007-05-26 17:36:03 +00001629 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001630 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001631 }
1632
1633 return 0;
1634}
1635
Avi Kivitya8170e52012-10-23 12:30:10 +02001636static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001637{
Anthony Liguoric227f092009-10-01 16:12:16 -05001638 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001639
Anthony Liguori7267c092011-08-20 22:09:37 -05001640 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001641
1642 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001643 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1644 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001645 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001646#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001647 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1648 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001649#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001650 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001651
1652 return mmio;
1653}
1654
Avi Kivity5312bd82012-02-12 18:32:55 +02001655static uint16_t dummy_section(MemoryRegion *mr)
1656{
1657 MemoryRegionSection section = {
1658 .mr = mr,
1659 .offset_within_address_space = 0,
1660 .offset_within_region = 0,
1661 .size = UINT64_MAX,
1662 };
1663
1664 return phys_section_add(&section);
1665}
1666
Avi Kivitya8170e52012-10-23 12:30:10 +02001667MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001668{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001669 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001670}
1671
Avi Kivitye9179ce2009-06-14 11:38:52 +03001672static void io_mem_init(void)
1673{
Paolo Bonzinibf8d5162013-05-24 14:39:13 +02001674 memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001675 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1676 "unassigned", UINT64_MAX);
1677 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1678 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001679 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1680 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001681 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1682 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001683}
1684
Avi Kivityac1970f2012-10-03 16:22:53 +02001685static void mem_begin(MemoryListener *listener)
1686{
1687 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1688
1689 destroy_all_mappings(d);
1690 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1691}
1692
Avi Kivity50c1e142012-02-08 21:36:02 +02001693static void core_begin(MemoryListener *listener)
1694{
Avi Kivity5312bd82012-02-12 18:32:55 +02001695 phys_sections_clear();
1696 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001697 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1698 phys_section_rom = dummy_section(&io_mem_rom);
1699 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001700}
1701
Avi Kivity1d711482012-10-02 18:54:45 +02001702static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001703{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001704 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001705
1706 /* since each CPU stores ram addresses in its TLB cache, we must
1707 reset the modified entries */
1708 /* XXX: slow ! */
1709 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1710 tlb_flush(env, 1);
1711 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001712}
1713
Avi Kivity93632742012-02-08 16:54:16 +02001714static void core_log_global_start(MemoryListener *listener)
1715{
1716 cpu_physical_memory_set_dirty_tracking(1);
1717}
1718
1719static void core_log_global_stop(MemoryListener *listener)
1720{
1721 cpu_physical_memory_set_dirty_tracking(0);
1722}
1723
Avi Kivity4855d412012-02-08 21:16:05 +02001724static void io_region_add(MemoryListener *listener,
1725 MemoryRegionSection *section)
1726{
Avi Kivitya2d33522012-03-05 17:40:12 +02001727 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1728
1729 mrio->mr = section->mr;
1730 mrio->offset = section->offset_within_region;
1731 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001732 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001733 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001734}
1735
1736static void io_region_del(MemoryListener *listener,
1737 MemoryRegionSection *section)
1738{
1739 isa_unassign_ioport(section->offset_within_address_space, section->size);
1740}
1741
Avi Kivity93632742012-02-08 16:54:16 +02001742static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001743 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001744 .log_global_start = core_log_global_start,
1745 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001746 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001747};
1748
Avi Kivity4855d412012-02-08 21:16:05 +02001749static MemoryListener io_memory_listener = {
1750 .region_add = io_region_add,
1751 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001752 .priority = 0,
1753};
1754
Avi Kivity1d711482012-10-02 18:54:45 +02001755static MemoryListener tcg_memory_listener = {
1756 .commit = tcg_commit,
1757};
1758
Avi Kivityac1970f2012-10-03 16:22:53 +02001759void address_space_init_dispatch(AddressSpace *as)
1760{
1761 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1762
1763 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1764 d->listener = (MemoryListener) {
1765 .begin = mem_begin,
1766 .region_add = mem_add,
1767 .region_nop = mem_add,
1768 .priority = 0,
1769 };
1770 as->dispatch = d;
1771 memory_listener_register(&d->listener, as);
1772}
1773
Avi Kivity83f3c252012-10-07 12:59:55 +02001774void address_space_destroy_dispatch(AddressSpace *as)
1775{
1776 AddressSpaceDispatch *d = as->dispatch;
1777
1778 memory_listener_unregister(&d->listener);
1779 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1780 g_free(d);
1781 as->dispatch = NULL;
1782}
1783
Avi Kivity62152b82011-07-26 14:26:14 +03001784static void memory_map_init(void)
1785{
Anthony Liguori7267c092011-08-20 22:09:37 -05001786 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001787 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001788 address_space_init(&address_space_memory, system_memory);
1789 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001790
Anthony Liguori7267c092011-08-20 22:09:37 -05001791 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001792 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001793 address_space_init(&address_space_io, system_io);
1794 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001795
Avi Kivityf6790af2012-10-02 20:13:51 +02001796 memory_listener_register(&core_memory_listener, &address_space_memory);
1797 memory_listener_register(&io_memory_listener, &address_space_io);
1798 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001799
1800 dma_context_init(&dma_context_memory, &address_space_memory,
1801 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001802}
1803
1804MemoryRegion *get_system_memory(void)
1805{
1806 return system_memory;
1807}
1808
Avi Kivity309cb472011-08-08 16:09:03 +03001809MemoryRegion *get_system_io(void)
1810{
1811 return system_io;
1812}
1813
pbrooke2eef172008-06-08 01:09:01 +00001814#endif /* !defined(CONFIG_USER_ONLY) */
1815
bellard13eb76e2004-01-24 15:23:36 +00001816/* physical memory access (slow version, mainly for debug) */
1817#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001818int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001819 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001820{
1821 int l, flags;
1822 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001823 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001824
1825 while (len > 0) {
1826 page = addr & TARGET_PAGE_MASK;
1827 l = (page + TARGET_PAGE_SIZE) - addr;
1828 if (l > len)
1829 l = len;
1830 flags = page_get_flags(page);
1831 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001832 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001833 if (is_write) {
1834 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001835 return -1;
bellard579a97f2007-11-11 14:26:47 +00001836 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001837 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001838 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001839 memcpy(p, buf, l);
1840 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001841 } else {
1842 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001843 return -1;
bellard579a97f2007-11-11 14:26:47 +00001844 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001845 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001846 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001847 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001848 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001849 }
1850 len -= l;
1851 buf += l;
1852 addr += l;
1853 }
Paul Brooka68fe892010-03-01 00:08:59 +00001854 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001855}
bellard8df1cd02005-01-28 22:37:22 +00001856
bellard13eb76e2004-01-24 15:23:36 +00001857#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001858
Avi Kivitya8170e52012-10-23 12:30:10 +02001859static void invalidate_and_set_dirty(hwaddr addr,
1860 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001861{
1862 if (!cpu_physical_memory_is_dirty(addr)) {
1863 /* invalidate code */
1864 tb_invalidate_phys_page_range(addr, addr + length, 0);
1865 /* set dirty bit */
1866 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1867 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001868 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001869}
1870
Avi Kivitya8170e52012-10-23 12:30:10 +02001871void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001872 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001873{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001874 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001875 uint8_t *ptr;
1876 uint32_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001877 hwaddr addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001878 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001879
bellard13eb76e2004-01-24 15:23:36 +00001880 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001881 l = len;
1882 section = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001883
bellard13eb76e2004-01-24 15:23:36 +00001884 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02001885 if (!memory_region_is_ram(section->mr)) {
bellard6a00d602005-11-21 23:25:50 +00001886 /* XXX: could force cpu_single_env to NULL to avoid
1887 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00001888 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001889 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001890 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001891 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00001892 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001893 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00001894 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001895 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001896 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001897 l = 2;
1898 } else {
bellard1c213d12005-09-03 10:49:04 +00001899 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001900 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02001901 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001902 l = 1;
1903 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001904 } else if (!section->readonly) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001905 addr1 += memory_region_get_ram_addr(section->mr);
bellard13eb76e2004-01-24 15:23:36 +00001906 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001907 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001908 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001909 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001910 }
1911 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00001912 if (!(memory_region_is_ram(section->mr) ||
1913 memory_region_is_romd(section->mr))) {
bellard13eb76e2004-01-24 15:23:36 +00001914 /* I/O case */
aurel326c2934d2009-02-18 21:37:17 +00001915 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001916 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001917 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00001918 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001919 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00001920 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00001921 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001922 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00001923 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001924 l = 2;
1925 } else {
bellard1c213d12005-09-03 10:49:04 +00001926 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02001927 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00001928 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001929 l = 1;
1930 }
1931 } else {
1932 /* RAM case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001933 ptr = qemu_get_ram_ptr(section->mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02001934 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001935 }
1936 }
1937 len -= l;
1938 buf += l;
1939 addr += l;
1940 }
1941}
bellard8df1cd02005-01-28 22:37:22 +00001942
Avi Kivitya8170e52012-10-23 12:30:10 +02001943void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001944 const uint8_t *buf, int len)
1945{
1946 address_space_rw(as, addr, (uint8_t *)buf, len, true);
1947}
1948
1949/**
1950 * address_space_read: read from an address space.
1951 *
1952 * @as: #AddressSpace to be accessed
1953 * @addr: address within that address space
1954 * @buf: buffer with the data transferred
1955 */
Avi Kivitya8170e52012-10-23 12:30:10 +02001956void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001957{
1958 address_space_rw(as, addr, buf, len, false);
1959}
1960
1961
Avi Kivitya8170e52012-10-23 12:30:10 +02001962void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001963 int len, int is_write)
1964{
1965 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
1966}
1967
bellardd0ecd2a2006-04-23 17:14:48 +00001968/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001969void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001970 const uint8_t *buf, int len)
1971{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001972 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00001973 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001974 hwaddr addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001975 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00001976
bellardd0ecd2a2006-04-23 17:14:48 +00001977 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001978 l = len;
1979 section = address_space_translate(&address_space_memory,
1980 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00001981
Blue Swirlcc5bea62012-04-14 14:56:48 +00001982 if (!(memory_region_is_ram(section->mr) ||
1983 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001984 /* do nothing */
1985 } else {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001986 addr1 += memory_region_get_ram_addr(section->mr);
bellardd0ecd2a2006-04-23 17:14:48 +00001987 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001988 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00001989 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001990 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00001991 }
1992 len -= l;
1993 buf += l;
1994 addr += l;
1995 }
1996}
1997
aliguori6d16c2f2009-01-22 16:59:11 +00001998typedef struct {
1999 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002000 hwaddr addr;
2001 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002002} BounceBuffer;
2003
2004static BounceBuffer bounce;
2005
aliguoriba223c22009-01-22 16:59:16 +00002006typedef struct MapClient {
2007 void *opaque;
2008 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002009 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002010} MapClient;
2011
Blue Swirl72cf2d42009-09-12 07:36:22 +00002012static QLIST_HEAD(map_client_list, MapClient) map_client_list
2013 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002014
2015void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2016{
Anthony Liguori7267c092011-08-20 22:09:37 -05002017 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002018
2019 client->opaque = opaque;
2020 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002021 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002022 return client;
2023}
2024
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002025static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002026{
2027 MapClient *client = (MapClient *)_client;
2028
Blue Swirl72cf2d42009-09-12 07:36:22 +00002029 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002030 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002031}
2032
2033static void cpu_notify_map_clients(void)
2034{
2035 MapClient *client;
2036
Blue Swirl72cf2d42009-09-12 07:36:22 +00002037 while (!QLIST_EMPTY(&map_client_list)) {
2038 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002039 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002040 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002041 }
2042}
2043
aliguori6d16c2f2009-01-22 16:59:11 +00002044/* Map a physical memory region into a host virtual address.
2045 * May map a subset of the requested range, given by and returned in *plen.
2046 * May return NULL if resources needed to perform the mapping are exhausted.
2047 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002048 * Use cpu_register_map_client() to know when retrying the map operation is
2049 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002050 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002051void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002052 hwaddr addr,
2053 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002054 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002055{
Avi Kivitya8170e52012-10-23 12:30:10 +02002056 hwaddr len = *plen;
2057 hwaddr todo = 0;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002058 hwaddr l, xlat;
Avi Kivityf3705d52012-03-08 16:16:34 +02002059 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002060 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002061 ram_addr_t rlen;
2062 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002063
2064 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002065 l = len;
2066 section = address_space_translate(as, addr, &xlat, &l, is_write);
aliguori6d16c2f2009-01-22 16:59:11 +00002067
Avi Kivityf3705d52012-03-08 16:16:34 +02002068 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002069 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002070 break;
2071 }
2072 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2073 bounce.addr = addr;
2074 bounce.len = l;
2075 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002076 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002077 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002078
2079 *plen = l;
2080 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002081 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002082 if (!todo) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002083 raddr = memory_region_get_ram_addr(section->mr) + xlat;
2084 } else {
2085 if (memory_region_get_ram_addr(section->mr) + xlat != raddr + todo) {
2086 break;
2087 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002088 }
aliguori6d16c2f2009-01-22 16:59:11 +00002089
2090 len -= l;
2091 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002092 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002093 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002094 rlen = todo;
2095 ret = qemu_ram_ptr_length(raddr, &rlen);
2096 *plen = rlen;
2097 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002098}
2099
Avi Kivityac1970f2012-10-03 16:22:53 +02002100/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002101 * Will also mark the memory as dirty if is_write == 1. access_len gives
2102 * the amount of memory that was actually read or written by the caller.
2103 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002104void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2105 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002106{
2107 if (buffer != bounce.buffer) {
2108 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002109 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002110 while (access_len) {
2111 unsigned l;
2112 l = TARGET_PAGE_SIZE;
2113 if (l > access_len)
2114 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002115 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002116 addr1 += l;
2117 access_len -= l;
2118 }
2119 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002120 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002121 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002122 }
aliguori6d16c2f2009-01-22 16:59:11 +00002123 return;
2124 }
2125 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002126 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002127 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002128 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002129 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002130 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002131}
bellardd0ecd2a2006-04-23 17:14:48 +00002132
Avi Kivitya8170e52012-10-23 12:30:10 +02002133void *cpu_physical_memory_map(hwaddr addr,
2134 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002135 int is_write)
2136{
2137 return address_space_map(&address_space_memory, addr, plen, is_write);
2138}
2139
Avi Kivitya8170e52012-10-23 12:30:10 +02002140void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2141 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002142{
2143 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2144}
2145
bellard8df1cd02005-01-28 22:37:22 +00002146/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002147static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002148 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002149{
bellard8df1cd02005-01-28 22:37:22 +00002150 uint8_t *ptr;
2151 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002152 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002153 hwaddr l = 4;
2154 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002155
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002156 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2157 false);
2158 if (l < 4 ||
2159 !(memory_region_is_ram(section->mr) ||
Blue Swirlcc5bea62012-04-14 14:56:48 +00002160 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00002161 /* I/O case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002162 val = io_mem_read(section->mr, addr1, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002163#if defined(TARGET_WORDS_BIGENDIAN)
2164 if (endian == DEVICE_LITTLE_ENDIAN) {
2165 val = bswap32(val);
2166 }
2167#else
2168 if (endian == DEVICE_BIG_ENDIAN) {
2169 val = bswap32(val);
2170 }
2171#endif
bellard8df1cd02005-01-28 22:37:22 +00002172 } else {
2173 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002174 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002175 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002176 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002177 switch (endian) {
2178 case DEVICE_LITTLE_ENDIAN:
2179 val = ldl_le_p(ptr);
2180 break;
2181 case DEVICE_BIG_ENDIAN:
2182 val = ldl_be_p(ptr);
2183 break;
2184 default:
2185 val = ldl_p(ptr);
2186 break;
2187 }
bellard8df1cd02005-01-28 22:37:22 +00002188 }
2189 return val;
2190}
2191
Avi Kivitya8170e52012-10-23 12:30:10 +02002192uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002193{
2194 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2195}
2196
Avi Kivitya8170e52012-10-23 12:30:10 +02002197uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002198{
2199 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2200}
2201
Avi Kivitya8170e52012-10-23 12:30:10 +02002202uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002203{
2204 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2205}
2206
bellard84b7b8e2005-11-28 21:19:04 +00002207/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002208static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002209 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002210{
bellard84b7b8e2005-11-28 21:19:04 +00002211 uint8_t *ptr;
2212 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002213 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002214 hwaddr l = 8;
2215 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002216
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002217 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2218 false);
2219 if (l < 8 ||
2220 !(memory_region_is_ram(section->mr) ||
Blue Swirlcc5bea62012-04-14 14:56:48 +00002221 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00002222 /* I/O case */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002223
2224 /* XXX This is broken when device endian != cpu endian.
2225 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00002226#ifdef TARGET_WORDS_BIGENDIAN
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002227 val = io_mem_read(section->mr, addr1, 4) << 32;
2228 val |= io_mem_read(section->mr, addr1 + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00002229#else
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002230 val = io_mem_read(section->mr, addr1, 4);
2231 val |= io_mem_read(section->mr, addr1 + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00002232#endif
2233 } else {
2234 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002235 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002236 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002237 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002238 switch (endian) {
2239 case DEVICE_LITTLE_ENDIAN:
2240 val = ldq_le_p(ptr);
2241 break;
2242 case DEVICE_BIG_ENDIAN:
2243 val = ldq_be_p(ptr);
2244 break;
2245 default:
2246 val = ldq_p(ptr);
2247 break;
2248 }
bellard84b7b8e2005-11-28 21:19:04 +00002249 }
2250 return val;
2251}
2252
Avi Kivitya8170e52012-10-23 12:30:10 +02002253uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002254{
2255 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2256}
2257
Avi Kivitya8170e52012-10-23 12:30:10 +02002258uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002259{
2260 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2261}
2262
Avi Kivitya8170e52012-10-23 12:30:10 +02002263uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002264{
2265 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2266}
2267
bellardaab33092005-10-30 20:48:42 +00002268/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002269uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002270{
2271 uint8_t val;
2272 cpu_physical_memory_read(addr, &val, 1);
2273 return val;
2274}
2275
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002276/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002277static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002278 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002279{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002280 uint8_t *ptr;
2281 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002282 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002283 hwaddr l = 2;
2284 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002285
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002286 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2287 false);
2288 if (l < 2 ||
2289 !(memory_region_is_ram(section->mr) ||
Blue Swirlcc5bea62012-04-14 14:56:48 +00002290 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002291 /* I/O case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002292 val = io_mem_read(section->mr, addr1, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002293#if defined(TARGET_WORDS_BIGENDIAN)
2294 if (endian == DEVICE_LITTLE_ENDIAN) {
2295 val = bswap16(val);
2296 }
2297#else
2298 if (endian == DEVICE_BIG_ENDIAN) {
2299 val = bswap16(val);
2300 }
2301#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002302 } else {
2303 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002304 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002305 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002306 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002307 switch (endian) {
2308 case DEVICE_LITTLE_ENDIAN:
2309 val = lduw_le_p(ptr);
2310 break;
2311 case DEVICE_BIG_ENDIAN:
2312 val = lduw_be_p(ptr);
2313 break;
2314 default:
2315 val = lduw_p(ptr);
2316 break;
2317 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002318 }
2319 return val;
bellardaab33092005-10-30 20:48:42 +00002320}
2321
Avi Kivitya8170e52012-10-23 12:30:10 +02002322uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002323{
2324 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2325}
2326
Avi Kivitya8170e52012-10-23 12:30:10 +02002327uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002328{
2329 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2330}
2331
Avi Kivitya8170e52012-10-23 12:30:10 +02002332uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002333{
2334 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2335}
2336
bellard8df1cd02005-01-28 22:37:22 +00002337/* warning: addr must be aligned. The ram page is not masked as dirty
2338 and the code inside is not invalidated. It is useful if the dirty
2339 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002340void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002341{
bellard8df1cd02005-01-28 22:37:22 +00002342 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002343 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002344 hwaddr l = 4;
2345 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002346
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002347 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2348 true);
2349 if (l < 4 || !memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivity37ec01d2012-03-08 18:08:35 +02002350 if (memory_region_is_ram(section->mr)) {
2351 section = &phys_sections[phys_section_rom];
2352 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002353 io_mem_write(section->mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002354 } else {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002355 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002356 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002357 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002358
2359 if (unlikely(in_migration)) {
2360 if (!cpu_physical_memory_is_dirty(addr1)) {
2361 /* invalidate code */
2362 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2363 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002364 cpu_physical_memory_set_dirty_flags(
2365 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002366 }
2367 }
bellard8df1cd02005-01-28 22:37:22 +00002368 }
2369}
2370
2371/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002372static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002373 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002374{
bellard8df1cd02005-01-28 22:37:22 +00002375 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002376 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002377 hwaddr l = 4;
2378 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002379
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002380 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2381 true);
2382 if (l < 4 || !memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivity37ec01d2012-03-08 18:08:35 +02002383 if (memory_region_is_ram(section->mr)) {
2384 section = &phys_sections[phys_section_rom];
2385 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002386#if defined(TARGET_WORDS_BIGENDIAN)
2387 if (endian == DEVICE_LITTLE_ENDIAN) {
2388 val = bswap32(val);
2389 }
2390#else
2391 if (endian == DEVICE_BIG_ENDIAN) {
2392 val = bswap32(val);
2393 }
2394#endif
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002395 io_mem_write(section->mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002396 } else {
bellard8df1cd02005-01-28 22:37:22 +00002397 /* RAM case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002398 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002399 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002400 switch (endian) {
2401 case DEVICE_LITTLE_ENDIAN:
2402 stl_le_p(ptr, val);
2403 break;
2404 case DEVICE_BIG_ENDIAN:
2405 stl_be_p(ptr, val);
2406 break;
2407 default:
2408 stl_p(ptr, val);
2409 break;
2410 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002411 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002412 }
2413}
2414
Avi Kivitya8170e52012-10-23 12:30:10 +02002415void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002416{
2417 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2418}
2419
Avi Kivitya8170e52012-10-23 12:30:10 +02002420void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002421{
2422 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2423}
2424
Avi Kivitya8170e52012-10-23 12:30:10 +02002425void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002426{
2427 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2428}
2429
bellardaab33092005-10-30 20:48:42 +00002430/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002431void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002432{
2433 uint8_t v = val;
2434 cpu_physical_memory_write(addr, &v, 1);
2435}
2436
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002437/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002438static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002439 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002440{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002441 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002442 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002443 hwaddr l = 2;
2444 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002445
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002446 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2447 true);
2448 if (l < 2 || !memory_region_is_ram(section->mr) || section->readonly) {
Avi Kivity37ec01d2012-03-08 18:08:35 +02002449 if (memory_region_is_ram(section->mr)) {
2450 section = &phys_sections[phys_section_rom];
2451 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002452#if defined(TARGET_WORDS_BIGENDIAN)
2453 if (endian == DEVICE_LITTLE_ENDIAN) {
2454 val = bswap16(val);
2455 }
2456#else
2457 if (endian == DEVICE_BIG_ENDIAN) {
2458 val = bswap16(val);
2459 }
2460#endif
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002461 io_mem_write(section->mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002462 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002463 /* RAM case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002464 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002465 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466 switch (endian) {
2467 case DEVICE_LITTLE_ENDIAN:
2468 stw_le_p(ptr, val);
2469 break;
2470 case DEVICE_BIG_ENDIAN:
2471 stw_be_p(ptr, val);
2472 break;
2473 default:
2474 stw_p(ptr, val);
2475 break;
2476 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002477 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002478 }
bellardaab33092005-10-30 20:48:42 +00002479}
2480
Avi Kivitya8170e52012-10-23 12:30:10 +02002481void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002482{
2483 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2484}
2485
Avi Kivitya8170e52012-10-23 12:30:10 +02002486void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002487{
2488 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2489}
2490
Avi Kivitya8170e52012-10-23 12:30:10 +02002491void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002492{
2493 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2494}
2495
bellardaab33092005-10-30 20:48:42 +00002496/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002497void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002498{
2499 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002500 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002501}
2502
Avi Kivitya8170e52012-10-23 12:30:10 +02002503void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002504{
2505 val = cpu_to_le64(val);
2506 cpu_physical_memory_write(addr, &val, 8);
2507}
2508
Avi Kivitya8170e52012-10-23 12:30:10 +02002509void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002510{
2511 val = cpu_to_be64(val);
2512 cpu_physical_memory_write(addr, &val, 8);
2513}
2514
aliguori5e2972f2009-03-28 17:51:36 +00002515/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002516int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002517 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002518{
2519 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002520 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002521 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002522
2523 while (len > 0) {
2524 page = addr & TARGET_PAGE_MASK;
2525 phys_addr = cpu_get_phys_page_debug(env, page);
2526 /* if no physical page mapped, return an error */
2527 if (phys_addr == -1)
2528 return -1;
2529 l = (page + TARGET_PAGE_SIZE) - addr;
2530 if (l > len)
2531 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002532 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002533 if (is_write)
2534 cpu_physical_memory_write_rom(phys_addr, buf, l);
2535 else
aliguori5e2972f2009-03-28 17:51:36 +00002536 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002537 len -= l;
2538 buf += l;
2539 addr += l;
2540 }
2541 return 0;
2542}
Paul Brooka68fe892010-03-01 00:08:59 +00002543#endif
bellard13eb76e2004-01-24 15:23:36 +00002544
Blue Swirl8e4a4242013-01-06 18:30:17 +00002545#if !defined(CONFIG_USER_ONLY)
2546
2547/*
2548 * A helper function for the _utterly broken_ virtio device model to find out if
2549 * it's running on a big endian machine. Don't do this at home kids!
2550 */
2551bool virtio_is_big_endian(void);
2552bool virtio_is_big_endian(void)
2553{
2554#if defined(TARGET_WORDS_BIGENDIAN)
2555 return true;
2556#else
2557 return false;
2558#endif
2559}
2560
2561#endif
2562
Wen Congyang76f35532012-05-07 12:04:18 +08002563#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002564bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002565{
2566 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002567 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002568
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002569 section = address_space_translate(&address_space_memory,
2570 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002571
2572 return !(memory_region_is_ram(section->mr) ||
2573 memory_region_is_romd(section->mr));
2574}
2575#endif