blob: 510d01bee8319fb443222d07c5649b199e24e751 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
blueswir1db7b5422007-05-26 17:36:03 +000053//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000054
pbrook99773bd2006-04-16 15:14:59 +000055#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000056int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000057static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000058
Paolo Bonzinia3161032012-11-14 15:54:48 +010059RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030060
61static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030062static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030063
Avi Kivityf6790af2012-10-02 20:13:51 +020064AddressSpace address_space_io;
65AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100066DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020067
Paolo Bonzini0844e002013-05-24 14:37:28 +020068MemoryRegion io_mem_rom, io_mem_notdirty;
69static MemoryRegion io_mem_unassigned, io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020070
pbrooke2eef172008-06-08 01:09:01 +000071#endif
bellard9fa3e852004-01-04 18:06:42 +000072
Andreas Färber9349b4f2012-03-14 01:38:32 +010073CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000074/* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010076DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000077/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000078 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000079 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010080int use_icount;
bellard6a00d602005-11-21 23:25:50 +000081
pbrooke2eef172008-06-08 01:09:01 +000082#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020083
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020084typedef struct PhysPageEntry PhysPageEntry;
85
86struct PhysPageEntry {
87 uint16_t is_leaf : 1;
88 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
89 uint16_t ptr : 15;
90};
91
92struct AddressSpaceDispatch {
93 /* This is a multi-level map on the physical address space.
94 * The bottom level has pointers to MemoryRegionSections.
95 */
96 PhysPageEntry phys_map;
97 MemoryListener listener;
98};
99
Avi Kivity5312bd82012-02-12 18:32:55 +0200100static MemoryRegionSection *phys_sections;
101static unsigned phys_sections_nb, phys_sections_nb_alloc;
102static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200103static uint16_t phys_section_notdirty;
104static uint16_t phys_section_rom;
105static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200106
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200107/* Simple allocator for PhysPageEntry nodes */
108static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
109static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
110
Avi Kivity07f07b32012-02-13 20:45:32 +0200111#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200112
pbrooke2eef172008-06-08 01:09:01 +0000113static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300114static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000115static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000116
Avi Kivity1ec9b902012-01-02 12:47:48 +0200117static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000118#endif
bellard54936002003-05-13 00:25:15 +0000119
Paul Brook6d9a1302010-02-28 23:55:53 +0000120#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200121
Avi Kivityf7bf5462012-02-13 20:12:05 +0200122static void phys_map_node_reserve(unsigned nodes)
123{
124 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
125 typedef PhysPageEntry Node[L2_SIZE];
126 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
127 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
128 phys_map_nodes_nb + nodes);
129 phys_map_nodes = g_renew(Node, phys_map_nodes,
130 phys_map_nodes_nb_alloc);
131 }
132}
133
134static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200135{
136 unsigned i;
137 uint16_t ret;
138
Avi Kivityf7bf5462012-02-13 20:12:05 +0200139 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200140 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200141 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200142 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200143 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200144 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200145 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200146 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200147}
148
149static void phys_map_nodes_reset(void)
150{
151 phys_map_nodes_nb = 0;
152}
153
Avi Kivityf7bf5462012-02-13 20:12:05 +0200154
Avi Kivitya8170e52012-10-23 12:30:10 +0200155static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
156 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200157 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200158{
159 PhysPageEntry *p;
160 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200161 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200162
Avi Kivity07f07b32012-02-13 20:45:32 +0200163 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200164 lp->ptr = phys_map_node_alloc();
165 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200166 if (level == 0) {
167 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200168 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200169 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200170 }
171 }
172 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200173 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200174 }
Avi Kivity29990972012-02-13 20:21:20 +0200175 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200176
Avi Kivity29990972012-02-13 20:21:20 +0200177 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200178 if ((*index & (step - 1)) == 0 && *nb >= step) {
179 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200180 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200181 *index += step;
182 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200183 } else {
184 phys_page_set_level(lp, index, nb, leaf, level - 1);
185 }
186 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187 }
188}
189
Avi Kivityac1970f2012-10-03 16:22:53 +0200190static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200191 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200192 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000193{
Avi Kivity29990972012-02-13 20:21:20 +0200194 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200195 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000196
Avi Kivityac1970f2012-10-03 16:22:53 +0200197 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000198}
199
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200200static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000201{
Avi Kivityac1970f2012-10-03 16:22:53 +0200202 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200203 PhysPageEntry *p;
204 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200205
Avi Kivity07f07b32012-02-13 20:45:32 +0200206 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200207 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinifd298932013-05-20 12:21:07 +0200208 return &phys_sections[phys_section_unassigned];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200209 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200210 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200211 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200212 }
Paolo Bonzinifd298932013-05-20 12:21:07 +0200213 return &phys_sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200214}
215
Blue Swirle5548612012-04-21 13:08:33 +0000216bool memory_region_is_unassigned(MemoryRegion *mr)
217{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200218 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000219 && mr != &io_mem_watch;
220}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200221
Jan Kiszka9f029602013-05-06 16:48:02 +0200222static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
223 hwaddr addr)
224{
225 return phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
226}
227
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200228MemoryRegionSection *address_space_translate(AddressSpace *as, hwaddr addr,
229 hwaddr *xlat, hwaddr *plen,
230 bool is_write)
231{
232 MemoryRegionSection *section;
233 Int128 diff;
234
Jan Kiszka9f029602013-05-06 16:48:02 +0200235 section = address_space_lookup_region(as, addr);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200236 /* Compute offset within MemoryRegionSection */
237 addr -= section->offset_within_address_space;
238
239 /* Compute offset within MemoryRegion */
240 *xlat = addr + section->offset_within_region;
241
242 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100243 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200244 return section;
245}
bellard9fa3e852004-01-04 18:06:42 +0000246#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000247
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200248void cpu_exec_init_all(void)
249{
250#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700251 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200252 memory_map_init();
253 io_mem_init();
254#endif
255}
256
Andreas Färberb170fce2013-01-20 20:23:22 +0100257#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000258
Juan Quintelae59fb372009-09-29 22:48:21 +0200259static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200260{
Andreas Färber259186a2013-01-17 18:51:17 +0100261 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200262
aurel323098dba2009-03-07 21:28:24 +0000263 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
264 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100265 cpu->interrupt_request &= ~0x01;
266 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000267
268 return 0;
269}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200270
271static const VMStateDescription vmstate_cpu_common = {
272 .name = "cpu_common",
273 .version_id = 1,
274 .minimum_version_id = 1,
275 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200276 .post_load = cpu_common_post_load,
277 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100278 VMSTATE_UINT32(halted, CPUState),
279 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200280 VMSTATE_END_OF_LIST()
281 }
282};
Andreas Färberb170fce2013-01-20 20:23:22 +0100283#else
284#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000285#endif
286
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100287CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400288{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100289 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100290 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400291
292 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100293 cpu = ENV_GET_CPU(env);
294 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400295 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100296 }
Glauber Costa950f1472009-06-09 12:15:18 -0400297 env = env->next_cpu;
298 }
299
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100300 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400301}
302
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200303void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
304{
305 CPUArchState *env = first_cpu;
306
307 while (env) {
308 func(ENV_GET_CPU(env), data);
309 env = env->next_cpu;
310 }
311}
312
Andreas Färber9349b4f2012-03-14 01:38:32 +0100313void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000314{
Andreas Färber9f09e182012-05-03 06:59:07 +0200315 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100316 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100317 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000318 int cpu_index;
319
pbrookc2764712009-03-07 15:24:59 +0000320#if defined(CONFIG_USER_ONLY)
321 cpu_list_lock();
322#endif
bellard6a00d602005-11-21 23:25:50 +0000323 env->next_cpu = NULL;
324 penv = &first_cpu;
325 cpu_index = 0;
326 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700327 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000328 cpu_index++;
329 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100330 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100331 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000332 QTAILQ_INIT(&env->breakpoints);
333 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100334#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200335 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100336#endif
bellard6a00d602005-11-21 23:25:50 +0000337 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000338#if defined(CONFIG_USER_ONLY)
339 cpu_list_unlock();
340#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100341 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000342#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600343 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000344 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100345 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000346#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100347 if (cc->vmsd != NULL) {
348 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
349 }
bellardfd6ce8f2003-05-14 19:00:11 +0000350}
351
bellard1fddef42005-04-17 19:16:13 +0000352#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000353#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100354static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000355{
356 tb_invalidate_phys_page_range(pc, pc + 1, 0);
357}
358#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400359static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
360{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400361 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
362 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400363}
bellardc27004e2005-01-03 23:35:10 +0000364#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000365#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000366
Paul Brookc527ee82010-03-01 03:31:14 +0000367#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100368void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000369
370{
371}
372
Andreas Färber9349b4f2012-03-14 01:38:32 +0100373int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000374 int flags, CPUWatchpoint **watchpoint)
375{
376 return -ENOSYS;
377}
378#else
pbrook6658ffb2007-03-16 23:58:11 +0000379/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100380int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000381 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000382{
aliguorib4051332008-11-18 20:14:20 +0000383 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000384 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000385
aliguorib4051332008-11-18 20:14:20 +0000386 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400387 if ((len & (len - 1)) || (addr & ~len_mask) ||
388 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000389 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
390 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
391 return -EINVAL;
392 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500393 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000394
aliguoria1d1bb32008-11-18 20:07:32 +0000395 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000396 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000397 wp->flags = flags;
398
aliguori2dc9f412008-11-18 20:56:59 +0000399 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000400 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000401 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000402 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000403 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000404
pbrook6658ffb2007-03-16 23:58:11 +0000405 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000406
407 if (watchpoint)
408 *watchpoint = wp;
409 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000410}
411
aliguoria1d1bb32008-11-18 20:07:32 +0000412/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100413int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000414 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000415{
aliguorib4051332008-11-18 20:14:20 +0000416 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000417 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000418
Blue Swirl72cf2d42009-09-12 07:36:22 +0000419 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000420 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000421 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000422 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000423 return 0;
424 }
425 }
aliguoria1d1bb32008-11-18 20:07:32 +0000426 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000427}
428
aliguoria1d1bb32008-11-18 20:07:32 +0000429/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100430void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000431{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000432 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000433
aliguoria1d1bb32008-11-18 20:07:32 +0000434 tlb_flush_page(env, watchpoint->vaddr);
435
Anthony Liguori7267c092011-08-20 22:09:37 -0500436 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000437}
438
aliguoria1d1bb32008-11-18 20:07:32 +0000439/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100440void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000441{
aliguoric0ce9982008-11-25 22:13:57 +0000442 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000443
Blue Swirl72cf2d42009-09-12 07:36:22 +0000444 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000445 if (wp->flags & mask)
446 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000447 }
aliguoria1d1bb32008-11-18 20:07:32 +0000448}
Paul Brookc527ee82010-03-01 03:31:14 +0000449#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000450
451/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100452int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000453 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000454{
bellard1fddef42005-04-17 19:16:13 +0000455#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000456 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000457
Anthony Liguori7267c092011-08-20 22:09:37 -0500458 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000459
460 bp->pc = pc;
461 bp->flags = flags;
462
aliguori2dc9f412008-11-18 20:56:59 +0000463 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000464 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000465 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000466 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000467 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000468
469 breakpoint_invalidate(env, pc);
470
471 if (breakpoint)
472 *breakpoint = bp;
473 return 0;
474#else
475 return -ENOSYS;
476#endif
477}
478
479/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100480int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000481{
482#if defined(TARGET_HAS_ICE)
483 CPUBreakpoint *bp;
484
Blue Swirl72cf2d42009-09-12 07:36:22 +0000485 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000486 if (bp->pc == pc && bp->flags == flags) {
487 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000488 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000489 }
bellard4c3a88a2003-07-26 12:06:08 +0000490 }
aliguoria1d1bb32008-11-18 20:07:32 +0000491 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000492#else
aliguoria1d1bb32008-11-18 20:07:32 +0000493 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000494#endif
495}
496
aliguoria1d1bb32008-11-18 20:07:32 +0000497/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100498void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000499{
bellard1fddef42005-04-17 19:16:13 +0000500#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000501 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000502
aliguoria1d1bb32008-11-18 20:07:32 +0000503 breakpoint_invalidate(env, breakpoint->pc);
504
Anthony Liguori7267c092011-08-20 22:09:37 -0500505 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000506#endif
507}
508
509/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100510void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000511{
512#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000513 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000514
Blue Swirl72cf2d42009-09-12 07:36:22 +0000515 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000516 if (bp->flags & mask)
517 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000518 }
bellard4c3a88a2003-07-26 12:06:08 +0000519#endif
520}
521
bellardc33a3462003-07-29 20:50:33 +0000522/* enable or disable single step mode. EXCP_DEBUG is returned by the
523 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100524void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000525{
bellard1fddef42005-04-17 19:16:13 +0000526#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000527 if (env->singlestep_enabled != enabled) {
528 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000529 if (kvm_enabled())
530 kvm_update_guest_debug(env, 0);
531 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100532 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000533 /* XXX: only flush what is necessary */
534 tb_flush(env);
535 }
bellardc33a3462003-07-29 20:50:33 +0000536 }
537#endif
538}
539
Andreas Färber9349b4f2012-03-14 01:38:32 +0100540void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000541{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100542 CPUState *cpu = ENV_GET_CPU(env);
543
544 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000545 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000546}
547
Andreas Färber9349b4f2012-03-14 01:38:32 +0100548void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000549{
550 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000551 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000552
553 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000554 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000555 fprintf(stderr, "qemu: fatal: ");
556 vfprintf(stderr, fmt, ap);
557 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100558 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000559 if (qemu_log_enabled()) {
560 qemu_log("qemu: fatal: ");
561 qemu_log_vprintf(fmt, ap2);
562 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100563 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000564 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000565 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000566 }
pbrook493ae1f2007-11-23 16:53:59 +0000567 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000568 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200569#if defined(CONFIG_USER_ONLY)
570 {
571 struct sigaction act;
572 sigfillset(&act.sa_mask);
573 act.sa_handler = SIG_DFL;
574 sigaction(SIGABRT, &act, NULL);
575 }
576#endif
bellard75012672003-06-21 13:11:07 +0000577 abort();
578}
579
Andreas Färber9349b4f2012-03-14 01:38:32 +0100580CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000581{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100582 CPUArchState *new_env = cpu_init(env->cpu_model_str);
583 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000584#if defined(TARGET_HAS_ICE)
585 CPUBreakpoint *bp;
586 CPUWatchpoint *wp;
587#endif
588
Andreas Färber9349b4f2012-03-14 01:38:32 +0100589 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000590
Andreas Färber55e5c282012-12-17 06:18:02 +0100591 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000592 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000593
594 /* Clone all break/watchpoints.
595 Note: Once we support ptrace with hw-debug register access, make sure
596 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000597 QTAILQ_INIT(&env->breakpoints);
598 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000599#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000600 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000601 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
602 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000603 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000604 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
605 wp->flags, NULL);
606 }
607#endif
608
thsc5be9f02007-02-28 20:20:53 +0000609 return new_env;
610}
611
bellard01243112004-01-04 15:48:17 +0000612#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200613static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
614 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000615{
Juan Quintelad24981d2012-05-22 00:42:40 +0200616 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000617
bellard1ccde1c2004-02-06 19:46:14 +0000618 /* we modify the TLB cache so that the dirty bit will be set again
619 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200620 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200621 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000622 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200623 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000624 != (end - 1) - start) {
625 abort();
626 }
Blue Swirle5548612012-04-21 13:08:33 +0000627 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200628
629}
630
631/* Note: start and end must be within the same ram block. */
632void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
633 int dirty_flags)
634{
635 uintptr_t length;
636
637 start &= TARGET_PAGE_MASK;
638 end = TARGET_PAGE_ALIGN(end);
639
640 length = end - start;
641 if (length == 0)
642 return;
643 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
644
645 if (tcg_enabled()) {
646 tlb_reset_dirty_range_all(start, end, length);
647 }
bellard1ccde1c2004-02-06 19:46:14 +0000648}
649
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000650static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000651{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200652 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000653 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200654 return ret;
aliguori74576192008-10-06 14:02:03 +0000655}
656
Avi Kivitya8170e52012-10-23 12:30:10 +0200657hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200658 MemoryRegionSection *section,
659 target_ulong vaddr,
660 hwaddr paddr, hwaddr xlat,
661 int prot,
662 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000663{
Avi Kivitya8170e52012-10-23 12:30:10 +0200664 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000665 CPUWatchpoint *wp;
666
Blue Swirlcc5bea62012-04-14 14:56:48 +0000667 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000668 /* Normal RAM. */
669 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200670 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000671 if (!section->readonly) {
672 iotlb |= phys_section_notdirty;
673 } else {
674 iotlb |= phys_section_rom;
675 }
676 } else {
Blue Swirle5548612012-04-21 13:08:33 +0000677 iotlb = section - phys_sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200678 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000679 }
680
681 /* Make accesses to pages with watchpoints go via the
682 watchpoint trap routines. */
683 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
684 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
685 /* Avoid trapping reads of pages with a write breakpoint. */
686 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
687 iotlb = phys_section_watch + paddr;
688 *address |= TLB_MMIO;
689 break;
690 }
691 }
692 }
693
694 return iotlb;
695}
bellard9fa3e852004-01-04 18:06:42 +0000696#endif /* defined(CONFIG_USER_ONLY) */
697
pbrooke2eef172008-06-08 01:09:01 +0000698#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000699
Paul Brookc04b2b72010-03-01 03:31:14 +0000700#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
701typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200702 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200703 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200704 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000705} subpage_t;
706
Anthony Liguoric227f092009-10-01 16:12:16 -0500707static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200708 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200709static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200710static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200711{
Avi Kivity5312bd82012-02-12 18:32:55 +0200712 MemoryRegionSection *section = &phys_sections[section_index];
713 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200714
715 if (mr->subpage) {
716 subpage_t *subpage = container_of(mr, subpage_t, iomem);
717 memory_region_destroy(&subpage->iomem);
718 g_free(subpage);
719 }
720}
721
Avi Kivity4346ae32012-02-10 17:00:01 +0200722static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200723{
724 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200725 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200726
Avi Kivityc19e8802012-02-13 20:25:31 +0200727 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200728 return;
729 }
730
Avi Kivityc19e8802012-02-13 20:25:31 +0200731 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200732 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200733 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200734 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200735 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200736 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200737 }
Avi Kivity54688b12012-02-09 17:34:32 +0200738 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200739 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200740 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200741}
742
Avi Kivityac1970f2012-10-03 16:22:53 +0200743static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200744{
Avi Kivityac1970f2012-10-03 16:22:53 +0200745 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200746 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200747}
748
Avi Kivity5312bd82012-02-12 18:32:55 +0200749static uint16_t phys_section_add(MemoryRegionSection *section)
750{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200751 /* The physical section number is ORed with a page-aligned
752 * pointer to produce the iotlb entries. Thus it should
753 * never overflow into the page-aligned value.
754 */
755 assert(phys_sections_nb < TARGET_PAGE_SIZE);
756
Avi Kivity5312bd82012-02-12 18:32:55 +0200757 if (phys_sections_nb == phys_sections_nb_alloc) {
758 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
759 phys_sections = g_renew(MemoryRegionSection, phys_sections,
760 phys_sections_nb_alloc);
761 }
762 phys_sections[phys_sections_nb] = *section;
763 return phys_sections_nb++;
764}
765
766static void phys_sections_clear(void)
767{
768 phys_sections_nb = 0;
769}
770
Avi Kivityac1970f2012-10-03 16:22:53 +0200771static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200772{
773 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200774 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200775 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200776 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200777 MemoryRegionSection subsection = {
778 .offset_within_address_space = base,
779 .size = TARGET_PAGE_SIZE,
780 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200781 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200782
Avi Kivityf3705d52012-03-08 16:16:34 +0200783 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200784
Avi Kivityf3705d52012-03-08 16:16:34 +0200785 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200786 subpage = subpage_init(base);
787 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200788 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200789 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200790 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200791 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200792 }
793 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400794 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200795 subpage_register(subpage, start, end, phys_section_add(section));
796}
797
798
Avi Kivityac1970f2012-10-03 16:22:53 +0200799static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000800{
Avi Kivitya8170e52012-10-23 12:30:10 +0200801 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200802 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200803 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200804 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200805
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200806 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200807
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200808 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200809 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200810 section_index);
bellard33417e72003-08-10 21:47:01 +0000811}
812
Avi Kivity86a86232012-10-30 13:47:45 +0200813QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
814
815static MemoryRegionSection limit(MemoryRegionSection section)
816{
817 section.size = MIN(section.offset_within_address_space + section.size,
818 MAX_PHYS_ADDR + 1)
819 - section.offset_within_address_space;
820
821 return section;
822}
823
Avi Kivityac1970f2012-10-03 16:22:53 +0200824static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200825{
Avi Kivityac1970f2012-10-03 16:22:53 +0200826 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity86a86232012-10-30 13:47:45 +0200827 MemoryRegionSection now = limit(*section), remain = limit(*section);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200828
829 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
830 || (now.size < TARGET_PAGE_SIZE)) {
831 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
832 - now.offset_within_address_space,
833 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200834 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200835 remain.size -= now.size;
836 remain.offset_within_address_space += now.size;
837 remain.offset_within_region += now.size;
838 }
Tyler Hall69b67642012-07-25 18:45:04 -0400839 while (remain.size >= TARGET_PAGE_SIZE) {
840 now = remain;
841 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
842 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200843 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400844 } else {
845 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200846 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400847 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200848 remain.size -= now.size;
849 remain.offset_within_address_space += now.size;
850 remain.offset_within_region += now.size;
851 }
852 now = remain;
853 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200854 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200855 }
856}
857
Sheng Yang62a27442010-01-26 19:21:16 +0800858void qemu_flush_coalesced_mmio_buffer(void)
859{
860 if (kvm_enabled())
861 kvm_flush_coalesced_mmio_buffer();
862}
863
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700864void qemu_mutex_lock_ramlist(void)
865{
866 qemu_mutex_lock(&ram_list.mutex);
867}
868
869void qemu_mutex_unlock_ramlist(void)
870{
871 qemu_mutex_unlock(&ram_list.mutex);
872}
873
Marcelo Tosattic9027602010-03-01 20:25:08 -0300874#if defined(__linux__) && !defined(TARGET_S390X)
875
876#include <sys/vfs.h>
877
878#define HUGETLBFS_MAGIC 0x958458f6
879
880static long gethugepagesize(const char *path)
881{
882 struct statfs fs;
883 int ret;
884
885 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900886 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300887 } while (ret != 0 && errno == EINTR);
888
889 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900890 perror(path);
891 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300892 }
893
894 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900895 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300896
897 return fs.f_bsize;
898}
899
Alex Williamson04b16652010-07-02 11:13:17 -0600900static void *file_ram_alloc(RAMBlock *block,
901 ram_addr_t memory,
902 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300903{
904 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500905 char *sanitized_name;
906 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300907 void *area;
908 int fd;
909#ifdef MAP_POPULATE
910 int flags;
911#endif
912 unsigned long hpagesize;
913
914 hpagesize = gethugepagesize(path);
915 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900916 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300917 }
918
919 if (memory < hpagesize) {
920 return NULL;
921 }
922
923 if (kvm_enabled() && !kvm_has_sync_mmu()) {
924 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
925 return NULL;
926 }
927
Peter Feiner8ca761f2013-03-04 13:54:25 -0500928 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
929 sanitized_name = g_strdup(block->mr->name);
930 for (c = sanitized_name; *c != '\0'; c++) {
931 if (*c == '/')
932 *c = '_';
933 }
934
935 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
936 sanitized_name);
937 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300938
939 fd = mkstemp(filename);
940 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900941 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100942 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900943 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300944 }
945 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100946 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300947
948 memory = (memory+hpagesize-1) & ~(hpagesize-1);
949
950 /*
951 * ftruncate is not supported by hugetlbfs in older
952 * hosts, so don't bother bailing out on errors.
953 * If anything goes wrong with it under other filesystems,
954 * mmap will fail.
955 */
956 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900957 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300958
959#ifdef MAP_POPULATE
960 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
961 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
962 * to sidestep this quirk.
963 */
964 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
965 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
966#else
967 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
968#endif
969 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900970 perror("file_ram_alloc: can't mmap RAM pages");
971 close(fd);
972 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300973 }
Alex Williamson04b16652010-07-02 11:13:17 -0600974 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300975 return area;
976}
977#endif
978
Alex Williamsond17b5282010-06-25 11:08:38 -0600979static ram_addr_t find_ram_offset(ram_addr_t size)
980{
Alex Williamson04b16652010-07-02 11:13:17 -0600981 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600982 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600983
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100984 assert(size != 0); /* it would hand out same offset multiple times */
985
Paolo Bonzinia3161032012-11-14 15:54:48 +0100986 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600987 return 0;
988
Paolo Bonzinia3161032012-11-14 15:54:48 +0100989 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000990 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600991
992 end = block->offset + block->length;
993
Paolo Bonzinia3161032012-11-14 15:54:48 +0100994 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600995 if (next_block->offset >= end) {
996 next = MIN(next, next_block->offset);
997 }
998 }
999 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001000 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001001 mingap = next - end;
1002 }
1003 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001004
1005 if (offset == RAM_ADDR_MAX) {
1006 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1007 (uint64_t)size);
1008 abort();
1009 }
1010
Alex Williamson04b16652010-07-02 11:13:17 -06001011 return offset;
1012}
1013
Juan Quintela652d7ec2012-07-20 10:37:54 +02001014ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001015{
Alex Williamsond17b5282010-06-25 11:08:38 -06001016 RAMBlock *block;
1017 ram_addr_t last = 0;
1018
Paolo Bonzinia3161032012-11-14 15:54:48 +01001019 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001020 last = MAX(last, block->offset + block->length);
1021
1022 return last;
1023}
1024
Jason Baronddb97f12012-08-02 15:44:16 -04001025static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1026{
1027 int ret;
1028 QemuOpts *machine_opts;
1029
1030 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1031 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1032 if (machine_opts &&
1033 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1034 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1035 if (ret) {
1036 perror("qemu_madvise");
1037 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1038 "but dump_guest_core=off specified\n");
1039 }
1040 }
1041}
1042
Avi Kivityc5705a72011-12-20 15:59:12 +02001043void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001044{
1045 RAMBlock *new_block, *block;
1046
Avi Kivityc5705a72011-12-20 15:59:12 +02001047 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001048 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001049 if (block->offset == addr) {
1050 new_block = block;
1051 break;
1052 }
1053 }
1054 assert(new_block);
1055 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001056
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001057 if (dev) {
1058 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001059 if (id) {
1060 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001061 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001062 }
1063 }
1064 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1065
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001066 /* This assumes the iothread lock is taken here too. */
1067 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001068 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001069 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001070 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1071 new_block->idstr);
1072 abort();
1073 }
1074 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001075 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001076}
1077
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001078static int memory_try_enable_merging(void *addr, size_t len)
1079{
1080 QemuOpts *opts;
1081
1082 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1083 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1084 /* disabled by the user */
1085 return 0;
1086 }
1087
1088 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1089}
1090
Avi Kivityc5705a72011-12-20 15:59:12 +02001091ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1092 MemoryRegion *mr)
1093{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001094 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001095
1096 size = TARGET_PAGE_ALIGN(size);
1097 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001098
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001099 /* This assumes the iothread lock is taken here too. */
1100 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001101 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001102 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001103 if (host) {
1104 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001105 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001106 } else {
1107 if (mem_path) {
1108#if defined (__linux__) && !defined(TARGET_S390X)
1109 new_block->host = file_ram_alloc(new_block, size, mem_path);
1110 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001111 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001112 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001113 }
1114#else
1115 fprintf(stderr, "-mem-path option unsupported\n");
1116 exit(1);
1117#endif
1118 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001119 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001120 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001121 } else if (kvm_enabled()) {
1122 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001123 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001124 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001125 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001126 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001127 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001128 }
1129 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001130 new_block->length = size;
1131
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001132 /* Keep the list sorted from biggest to smallest block. */
1133 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1134 if (block->length < new_block->length) {
1135 break;
1136 }
1137 }
1138 if (block) {
1139 QTAILQ_INSERT_BEFORE(block, new_block, next);
1140 } else {
1141 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1142 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001143 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001144
Umesh Deshpandef798b072011-08-18 11:41:17 -07001145 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001146 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001147
Anthony Liguori7267c092011-08-20 22:09:37 -05001148 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001149 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001150 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1151 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001152 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001153
Jason Baronddb97f12012-08-02 15:44:16 -04001154 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001155 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001156
Cam Macdonell84b89d72010-07-26 18:10:57 -06001157 if (kvm_enabled())
1158 kvm_setup_guest_memory(new_block->host, size);
1159
1160 return new_block->offset;
1161}
1162
Avi Kivityc5705a72011-12-20 15:59:12 +02001163ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001164{
Avi Kivityc5705a72011-12-20 15:59:12 +02001165 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001166}
bellarde9a1ab12007-02-08 23:08:38 +00001167
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001168void qemu_ram_free_from_ptr(ram_addr_t addr)
1169{
1170 RAMBlock *block;
1171
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001172 /* This assumes the iothread lock is taken here too. */
1173 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001174 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001175 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001176 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001177 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001178 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001179 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001180 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001181 }
1182 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001183 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001184}
1185
Anthony Liguoric227f092009-10-01 16:12:16 -05001186void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001187{
Alex Williamson04b16652010-07-02 11:13:17 -06001188 RAMBlock *block;
1189
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001190 /* This assumes the iothread lock is taken here too. */
1191 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001192 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001193 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001194 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001195 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001196 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001197 if (block->flags & RAM_PREALLOC_MASK) {
1198 ;
1199 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001200#if defined (__linux__) && !defined(TARGET_S390X)
1201 if (block->fd) {
1202 munmap(block->host, block->length);
1203 close(block->fd);
1204 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001205 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001206 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001207#else
1208 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001209#endif
1210 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001211 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001212 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001213 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001214 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001215 }
Alex Williamson04b16652010-07-02 11:13:17 -06001216 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001217 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001218 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001219 }
1220 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001221 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001222
bellarde9a1ab12007-02-08 23:08:38 +00001223}
1224
Huang Yingcd19cfa2011-03-02 08:56:19 +01001225#ifndef _WIN32
1226void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1227{
1228 RAMBlock *block;
1229 ram_addr_t offset;
1230 int flags;
1231 void *area, *vaddr;
1232
Paolo Bonzinia3161032012-11-14 15:54:48 +01001233 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001234 offset = addr - block->offset;
1235 if (offset < block->length) {
1236 vaddr = block->host + offset;
1237 if (block->flags & RAM_PREALLOC_MASK) {
1238 ;
1239 } else {
1240 flags = MAP_FIXED;
1241 munmap(vaddr, length);
1242 if (mem_path) {
1243#if defined(__linux__) && !defined(TARGET_S390X)
1244 if (block->fd) {
1245#ifdef MAP_POPULATE
1246 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1247 MAP_PRIVATE;
1248#else
1249 flags |= MAP_PRIVATE;
1250#endif
1251 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1252 flags, block->fd, offset);
1253 } else {
1254 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1255 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1256 flags, -1, 0);
1257 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001258#else
1259 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001260#endif
1261 } else {
1262#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1263 flags |= MAP_SHARED | MAP_ANONYMOUS;
1264 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1265 flags, -1, 0);
1266#else
1267 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1268 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1269 flags, -1, 0);
1270#endif
1271 }
1272 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001273 fprintf(stderr, "Could not remap addr: "
1274 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001275 length, addr);
1276 exit(1);
1277 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001278 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001279 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001280 }
1281 return;
1282 }
1283 }
1284}
1285#endif /* !_WIN32 */
1286
pbrookdc828ca2009-04-09 22:21:07 +00001287/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001288 With the exception of the softmmu code in this file, this should
1289 only be used for local memory (e.g. video ram) that the device owns,
1290 and knows it isn't going to access beyond the end of the block.
1291
1292 It should not be used for general purpose DMA.
1293 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1294 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001295void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001296{
pbrook94a6b542009-04-11 17:15:54 +00001297 RAMBlock *block;
1298
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001299 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001300 block = ram_list.mru_block;
1301 if (block && addr - block->offset < block->length) {
1302 goto found;
1303 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001304 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001305 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001306 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001307 }
pbrook94a6b542009-04-11 17:15:54 +00001308 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001309
1310 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1311 abort();
1312
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001313found:
1314 ram_list.mru_block = block;
1315 if (xen_enabled()) {
1316 /* We need to check if the requested address is in the RAM
1317 * because we don't want to map the entire memory in QEMU.
1318 * In that case just map until the end of the page.
1319 */
1320 if (block->offset == 0) {
1321 return xen_map_cache(addr, 0, 0);
1322 } else if (block->host == NULL) {
1323 block->host =
1324 xen_map_cache(block->offset, block->length, 1);
1325 }
1326 }
1327 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001328}
1329
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001330/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1331 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1332 *
1333 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001334 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001335static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001336{
1337 RAMBlock *block;
1338
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001339 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001340 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001341 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001342 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001343 /* We need to check if the requested address is in the RAM
1344 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001345 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001346 */
1347 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001348 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001349 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001350 block->host =
1351 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001352 }
1353 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001354 return block->host + (addr - block->offset);
1355 }
1356 }
1357
1358 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1359 abort();
1360
1361 return NULL;
1362}
1363
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001364/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1365 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001366static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001367{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001368 if (*size == 0) {
1369 return NULL;
1370 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001371 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001372 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001373 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001374 RAMBlock *block;
1375
Paolo Bonzinia3161032012-11-14 15:54:48 +01001376 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001377 if (addr - block->offset < block->length) {
1378 if (addr - block->offset + *size > block->length)
1379 *size = block->length - addr + block->offset;
1380 return block->host + (addr - block->offset);
1381 }
1382 }
1383
1384 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1385 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001386 }
1387}
1388
Marcelo Tosattie8902612010-10-11 15:31:19 -03001389int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001390{
pbrook94a6b542009-04-11 17:15:54 +00001391 RAMBlock *block;
1392 uint8_t *host = ptr;
1393
Jan Kiszka868bb332011-06-21 22:59:09 +02001394 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001395 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001396 return 0;
1397 }
1398
Paolo Bonzinia3161032012-11-14 15:54:48 +01001399 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001400 /* This case append when the block is not mapped. */
1401 if (block->host == NULL) {
1402 continue;
1403 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001404 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001405 *ram_addr = block->offset + (host - block->host);
1406 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001407 }
pbrook94a6b542009-04-11 17:15:54 +00001408 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001409
Marcelo Tosattie8902612010-10-11 15:31:19 -03001410 return -1;
1411}
Alex Williamsonf471a172010-06-11 11:11:42 -06001412
Marcelo Tosattie8902612010-10-11 15:31:19 -03001413/* Some of the softmmu routines need to translate from a host pointer
1414 (typically a TLB entry) back to a ram offset. */
1415ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1416{
1417 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001418
Marcelo Tosattie8902612010-10-11 15:31:19 -03001419 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1420 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1421 abort();
1422 }
1423 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001424}
1425
Avi Kivitya8170e52012-10-23 12:30:10 +02001426static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001427 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001428{
bellard3a7d9292005-08-21 09:26:42 +00001429 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001430 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001431 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001432 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001433 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001434 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001435 switch (size) {
1436 case 1:
1437 stb_p(qemu_get_ram_ptr(ram_addr), val);
1438 break;
1439 case 2:
1440 stw_p(qemu_get_ram_ptr(ram_addr), val);
1441 break;
1442 case 4:
1443 stl_p(qemu_get_ram_ptr(ram_addr), val);
1444 break;
1445 default:
1446 abort();
1447 }
bellardf23db162005-08-21 19:12:28 +00001448 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001449 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001450 /* we remove the notdirty callback only if the code has been
1451 flushed */
1452 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001453 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001454}
1455
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001456static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1457 unsigned size, bool is_write)
1458{
1459 return is_write;
1460}
1461
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001462static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001463 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001464 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001465 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001466};
1467
pbrook0f459d12008-06-09 00:20:13 +00001468/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001469static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001470{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001471 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001472 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001473 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001474 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001475 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001476
aliguori06d55cc2008-11-18 20:24:06 +00001477 if (env->watchpoint_hit) {
1478 /* We re-entered the check after replacing the TB. Now raise
1479 * the debug interrupt so that is will trigger after the
1480 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001481 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001482 return;
1483 }
pbrook2e70f6e2008-06-29 01:03:05 +00001484 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001485 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001486 if ((vaddr == (wp->vaddr & len_mask) ||
1487 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001488 wp->flags |= BP_WATCHPOINT_HIT;
1489 if (!env->watchpoint_hit) {
1490 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001491 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001492 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1493 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001494 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001495 } else {
1496 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1497 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001498 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001499 }
aliguori06d55cc2008-11-18 20:24:06 +00001500 }
aliguori6e140f22008-11-18 20:37:55 +00001501 } else {
1502 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001503 }
1504 }
1505}
1506
pbrook6658ffb2007-03-16 23:58:11 +00001507/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1508 so these check for a hit then pass through to the normal out-of-line
1509 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001510static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001511 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001512{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001513 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1514 switch (size) {
1515 case 1: return ldub_phys(addr);
1516 case 2: return lduw_phys(addr);
1517 case 4: return ldl_phys(addr);
1518 default: abort();
1519 }
pbrook6658ffb2007-03-16 23:58:11 +00001520}
1521
Avi Kivitya8170e52012-10-23 12:30:10 +02001522static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001523 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001524{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001525 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1526 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001527 case 1:
1528 stb_phys(addr, val);
1529 break;
1530 case 2:
1531 stw_phys(addr, val);
1532 break;
1533 case 4:
1534 stl_phys(addr, val);
1535 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001536 default: abort();
1537 }
pbrook6658ffb2007-03-16 23:58:11 +00001538}
1539
Avi Kivity1ec9b902012-01-02 12:47:48 +02001540static const MemoryRegionOps watch_mem_ops = {
1541 .read = watch_mem_read,
1542 .write = watch_mem_write,
1543 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001544};
pbrook6658ffb2007-03-16 23:58:11 +00001545
Avi Kivitya8170e52012-10-23 12:30:10 +02001546static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001547 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001548{
Avi Kivity70c68e42012-01-02 12:32:48 +02001549 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001550 unsigned int idx = SUBPAGE_IDX(addr);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001551 uint64_t val;
1552
Avi Kivity5312bd82012-02-12 18:32:55 +02001553 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001554#if defined(DEBUG_SUBPAGE)
1555 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1556 mmio, len, addr, idx);
1557#endif
blueswir1db7b5422007-05-26 17:36:03 +00001558
Avi Kivity5312bd82012-02-12 18:32:55 +02001559 section = &phys_sections[mmio->sub_section[idx]];
1560 addr += mmio->base;
1561 addr -= section->offset_within_address_space;
1562 addr += section->offset_within_region;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001563 io_mem_read(section->mr, addr, &val, len);
1564 return val;
blueswir1db7b5422007-05-26 17:36:03 +00001565}
1566
Avi Kivitya8170e52012-10-23 12:30:10 +02001567static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001568 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001569{
Avi Kivity70c68e42012-01-02 12:32:48 +02001570 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001571 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001572 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001573#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001574 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1575 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001576 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001577#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001578
Avi Kivity5312bd82012-02-12 18:32:55 +02001579 section = &phys_sections[mmio->sub_section[idx]];
1580 addr += mmio->base;
1581 addr -= section->offset_within_address_space;
1582 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001583 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001584}
1585
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001586static bool subpage_accepts(void *opaque, hwaddr addr,
1587 unsigned size, bool is_write)
1588{
1589 subpage_t *mmio = opaque;
1590 unsigned int idx = SUBPAGE_IDX(addr);
1591 MemoryRegionSection *section;
1592#if defined(DEBUG_SUBPAGE)
1593 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx
1594 " idx %d\n", __func__, mmio,
1595 is_write ? 'w' : 'r', len, addr, idx);
1596#endif
1597
1598 section = &phys_sections[mmio->sub_section[idx]];
1599 addr += mmio->base;
1600 addr -= section->offset_within_address_space;
1601 addr += section->offset_within_region;
1602 return memory_region_access_valid(section->mr, addr, size, is_write);
1603}
1604
Avi Kivity70c68e42012-01-02 12:32:48 +02001605static const MemoryRegionOps subpage_ops = {
1606 .read = subpage_read,
1607 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001608 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001609 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001610};
1611
Avi Kivitya8170e52012-10-23 12:30:10 +02001612static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001613 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001614{
1615 ram_addr_t raddr = addr;
1616 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001617 switch (size) {
1618 case 1: return ldub_p(ptr);
1619 case 2: return lduw_p(ptr);
1620 case 4: return ldl_p(ptr);
1621 default: abort();
1622 }
Andreas Färber56384e82011-11-30 16:26:21 +01001623}
1624
Avi Kivitya8170e52012-10-23 12:30:10 +02001625static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001626 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001627{
1628 ram_addr_t raddr = addr;
1629 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001630 switch (size) {
1631 case 1: return stb_p(ptr, value);
1632 case 2: return stw_p(ptr, value);
1633 case 4: return stl_p(ptr, value);
1634 default: abort();
1635 }
Andreas Färber56384e82011-11-30 16:26:21 +01001636}
1637
Avi Kivityde712f92012-01-02 12:41:07 +02001638static const MemoryRegionOps subpage_ram_ops = {
1639 .read = subpage_ram_read,
1640 .write = subpage_ram_write,
1641 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001642};
1643
Anthony Liguoric227f092009-10-01 16:12:16 -05001644static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001645 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001646{
1647 int idx, eidx;
1648
1649 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1650 return -1;
1651 idx = SUBPAGE_IDX(start);
1652 eidx = SUBPAGE_IDX(end);
1653#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001654 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001655 mmio, start, end, idx, eidx, memory);
1656#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001657 if (memory_region_is_ram(phys_sections[section].mr)) {
1658 MemoryRegionSection new_section = phys_sections[section];
1659 new_section.mr = &io_mem_subpage_ram;
1660 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001661 }
blueswir1db7b5422007-05-26 17:36:03 +00001662 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001663 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001664 }
1665
1666 return 0;
1667}
1668
Avi Kivitya8170e52012-10-23 12:30:10 +02001669static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001670{
Anthony Liguoric227f092009-10-01 16:12:16 -05001671 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001672
Anthony Liguori7267c092011-08-20 22:09:37 -05001673 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001674
1675 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001676 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1677 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001678 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001679#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001680 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1681 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001682#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001683 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001684
1685 return mmio;
1686}
1687
Avi Kivity5312bd82012-02-12 18:32:55 +02001688static uint16_t dummy_section(MemoryRegion *mr)
1689{
1690 MemoryRegionSection section = {
1691 .mr = mr,
1692 .offset_within_address_space = 0,
1693 .offset_within_region = 0,
1694 .size = UINT64_MAX,
1695 };
1696
1697 return phys_section_add(&section);
1698}
1699
Avi Kivitya8170e52012-10-23 12:30:10 +02001700MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001701{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001702 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001703}
1704
Avi Kivitye9179ce2009-06-14 11:38:52 +03001705static void io_mem_init(void)
1706{
Paolo Bonzinibf8d5162013-05-24 14:39:13 +02001707 memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001708 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1709 "unassigned", UINT64_MAX);
1710 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1711 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001712 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1713 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001714 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1715 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001716}
1717
Avi Kivityac1970f2012-10-03 16:22:53 +02001718static void mem_begin(MemoryListener *listener)
1719{
1720 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1721
1722 destroy_all_mappings(d);
1723 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1724}
1725
Avi Kivity50c1e142012-02-08 21:36:02 +02001726static void core_begin(MemoryListener *listener)
1727{
Avi Kivity5312bd82012-02-12 18:32:55 +02001728 phys_sections_clear();
1729 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001730 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1731 phys_section_rom = dummy_section(&io_mem_rom);
1732 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001733}
1734
Avi Kivity1d711482012-10-02 18:54:45 +02001735static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001736{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001737 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001738
1739 /* since each CPU stores ram addresses in its TLB cache, we must
1740 reset the modified entries */
1741 /* XXX: slow ! */
1742 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1743 tlb_flush(env, 1);
1744 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001745}
1746
Avi Kivity93632742012-02-08 16:54:16 +02001747static void core_log_global_start(MemoryListener *listener)
1748{
1749 cpu_physical_memory_set_dirty_tracking(1);
1750}
1751
1752static void core_log_global_stop(MemoryListener *listener)
1753{
1754 cpu_physical_memory_set_dirty_tracking(0);
1755}
1756
Avi Kivity4855d412012-02-08 21:16:05 +02001757static void io_region_add(MemoryListener *listener,
1758 MemoryRegionSection *section)
1759{
Avi Kivitya2d33522012-03-05 17:40:12 +02001760 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1761
1762 mrio->mr = section->mr;
1763 mrio->offset = section->offset_within_region;
1764 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001765 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001766 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001767}
1768
1769static void io_region_del(MemoryListener *listener,
1770 MemoryRegionSection *section)
1771{
1772 isa_unassign_ioport(section->offset_within_address_space, section->size);
1773}
1774
Avi Kivity93632742012-02-08 16:54:16 +02001775static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001776 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001777 .log_global_start = core_log_global_start,
1778 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001779 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001780};
1781
Avi Kivity4855d412012-02-08 21:16:05 +02001782static MemoryListener io_memory_listener = {
1783 .region_add = io_region_add,
1784 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001785 .priority = 0,
1786};
1787
Avi Kivity1d711482012-10-02 18:54:45 +02001788static MemoryListener tcg_memory_listener = {
1789 .commit = tcg_commit,
1790};
1791
Avi Kivityac1970f2012-10-03 16:22:53 +02001792void address_space_init_dispatch(AddressSpace *as)
1793{
1794 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1795
1796 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1797 d->listener = (MemoryListener) {
1798 .begin = mem_begin,
1799 .region_add = mem_add,
1800 .region_nop = mem_add,
1801 .priority = 0,
1802 };
1803 as->dispatch = d;
1804 memory_listener_register(&d->listener, as);
1805}
1806
Avi Kivity83f3c252012-10-07 12:59:55 +02001807void address_space_destroy_dispatch(AddressSpace *as)
1808{
1809 AddressSpaceDispatch *d = as->dispatch;
1810
1811 memory_listener_unregister(&d->listener);
1812 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1813 g_free(d);
1814 as->dispatch = NULL;
1815}
1816
Avi Kivity62152b82011-07-26 14:26:14 +03001817static void memory_map_init(void)
1818{
Anthony Liguori7267c092011-08-20 22:09:37 -05001819 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001820 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001821 address_space_init(&address_space_memory, system_memory);
1822 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001823
Anthony Liguori7267c092011-08-20 22:09:37 -05001824 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001825 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001826 address_space_init(&address_space_io, system_io);
1827 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001828
Avi Kivityf6790af2012-10-02 20:13:51 +02001829 memory_listener_register(&core_memory_listener, &address_space_memory);
1830 memory_listener_register(&io_memory_listener, &address_space_io);
1831 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001832
1833 dma_context_init(&dma_context_memory, &address_space_memory,
1834 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001835}
1836
1837MemoryRegion *get_system_memory(void)
1838{
1839 return system_memory;
1840}
1841
Avi Kivity309cb472011-08-08 16:09:03 +03001842MemoryRegion *get_system_io(void)
1843{
1844 return system_io;
1845}
1846
pbrooke2eef172008-06-08 01:09:01 +00001847#endif /* !defined(CONFIG_USER_ONLY) */
1848
bellard13eb76e2004-01-24 15:23:36 +00001849/* physical memory access (slow version, mainly for debug) */
1850#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001851int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001852 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001853{
1854 int l, flags;
1855 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001856 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001857
1858 while (len > 0) {
1859 page = addr & TARGET_PAGE_MASK;
1860 l = (page + TARGET_PAGE_SIZE) - addr;
1861 if (l > len)
1862 l = len;
1863 flags = page_get_flags(page);
1864 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001865 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001866 if (is_write) {
1867 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001868 return -1;
bellard579a97f2007-11-11 14:26:47 +00001869 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001870 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001871 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001872 memcpy(p, buf, l);
1873 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001874 } else {
1875 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001876 return -1;
bellard579a97f2007-11-11 14:26:47 +00001877 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001878 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001879 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001880 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001881 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001882 }
1883 len -= l;
1884 buf += l;
1885 addr += l;
1886 }
Paul Brooka68fe892010-03-01 00:08:59 +00001887 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001888}
bellard8df1cd02005-01-28 22:37:22 +00001889
bellard13eb76e2004-01-24 15:23:36 +00001890#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001891
Avi Kivitya8170e52012-10-23 12:30:10 +02001892static void invalidate_and_set_dirty(hwaddr addr,
1893 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001894{
1895 if (!cpu_physical_memory_is_dirty(addr)) {
1896 /* invalidate code */
1897 tb_invalidate_phys_page_range(addr, addr + length, 0);
1898 /* set dirty bit */
1899 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1900 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001901 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001902}
1903
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001904static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1905{
1906 if (memory_region_is_ram(mr)) {
1907 return !(is_write && mr->readonly);
1908 }
1909 if (memory_region_is_romd(mr)) {
1910 return !is_write;
1911 }
1912
1913 return false;
1914}
1915
Paolo Bonzini82f25632013-05-24 11:59:43 +02001916static inline int memory_access_size(int l, hwaddr addr)
1917{
1918 if (l >= 4 && ((addr & 3) == 0)) {
1919 return 4;
1920 }
1921 if (l >= 2 && ((addr & 1) == 0)) {
1922 return 2;
1923 }
1924 return 1;
1925}
1926
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001927bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001928 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001929{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001930 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001931 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001932 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001933 hwaddr addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001934 MemoryRegionSection *section;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001935 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001936
bellard13eb76e2004-01-24 15:23:36 +00001937 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001938 l = len;
1939 section = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001940
bellard13eb76e2004-01-24 15:23:36 +00001941 if (is_write) {
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001942 if (!memory_access_is_direct(section->mr, is_write)) {
Paolo Bonzini82f25632013-05-24 11:59:43 +02001943 l = memory_access_size(l, addr1);
bellard6a00d602005-11-21 23:25:50 +00001944 /* XXX: could force cpu_single_env to NULL to avoid
1945 potential bugs */
Paolo Bonzini82f25632013-05-24 11:59:43 +02001946 if (l == 4) {
bellard1c213d12005-09-03 10:49:04 +00001947 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001948 val = ldl_p(buf);
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001949 error |= io_mem_write(section->mr, addr1, val, 4);
Paolo Bonzini82f25632013-05-24 11:59:43 +02001950 } else if (l == 2) {
bellard1c213d12005-09-03 10:49:04 +00001951 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001952 val = lduw_p(buf);
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001953 error |= io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001954 } else {
bellard1c213d12005-09-03 10:49:04 +00001955 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001956 val = ldub_p(buf);
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001957 error |= io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001958 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001959 } else {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001960 addr1 += memory_region_get_ram_addr(section->mr);
bellard13eb76e2004-01-24 15:23:36 +00001961 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001962 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001963 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001964 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001965 }
1966 } else {
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001967 if (!memory_access_is_direct(section->mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00001968 /* I/O case */
Paolo Bonzini82f25632013-05-24 11:59:43 +02001969 l = memory_access_size(l, addr1);
1970 if (l == 4) {
bellard13eb76e2004-01-24 15:23:36 +00001971 /* 32 bit read access */
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001972 error |= io_mem_read(section->mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00001973 stl_p(buf, val);
Paolo Bonzini82f25632013-05-24 11:59:43 +02001974 } else if (l == 2) {
bellard13eb76e2004-01-24 15:23:36 +00001975 /* 16 bit read access */
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001976 error |= io_mem_read(section->mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00001977 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001978 } else {
bellard1c213d12005-09-03 10:49:04 +00001979 /* 8 bit read access */
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001980 error |= io_mem_read(section->mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00001981 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001982 }
1983 } else {
1984 /* RAM case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001985 ptr = qemu_get_ram_ptr(section->mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02001986 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001987 }
1988 }
1989 len -= l;
1990 buf += l;
1991 addr += l;
1992 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001993
1994 return error;
bellard13eb76e2004-01-24 15:23:36 +00001995}
bellard8df1cd02005-01-28 22:37:22 +00001996
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001997bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001998 const uint8_t *buf, int len)
1999{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002000 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002001}
2002
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002003bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002004{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002005 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002006}
2007
2008
Avi Kivitya8170e52012-10-23 12:30:10 +02002009void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002010 int len, int is_write)
2011{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002012 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002013}
2014
bellardd0ecd2a2006-04-23 17:14:48 +00002015/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002016void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002017 const uint8_t *buf, int len)
2018{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002019 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002020 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002021 hwaddr addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002022 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00002023
bellardd0ecd2a2006-04-23 17:14:48 +00002024 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002025 l = len;
2026 section = address_space_translate(&address_space_memory,
2027 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002028
Blue Swirlcc5bea62012-04-14 14:56:48 +00002029 if (!(memory_region_is_ram(section->mr) ||
2030 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002031 /* do nothing */
2032 } else {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002033 addr1 += memory_region_get_ram_addr(section->mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002034 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002035 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002036 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002037 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002038 }
2039 len -= l;
2040 buf += l;
2041 addr += l;
2042 }
2043}
2044
aliguori6d16c2f2009-01-22 16:59:11 +00002045typedef struct {
2046 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002047 hwaddr addr;
2048 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002049} BounceBuffer;
2050
2051static BounceBuffer bounce;
2052
aliguoriba223c22009-01-22 16:59:16 +00002053typedef struct MapClient {
2054 void *opaque;
2055 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002056 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002057} MapClient;
2058
Blue Swirl72cf2d42009-09-12 07:36:22 +00002059static QLIST_HEAD(map_client_list, MapClient) map_client_list
2060 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002061
2062void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2063{
Anthony Liguori7267c092011-08-20 22:09:37 -05002064 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002065
2066 client->opaque = opaque;
2067 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002068 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002069 return client;
2070}
2071
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002072static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002073{
2074 MapClient *client = (MapClient *)_client;
2075
Blue Swirl72cf2d42009-09-12 07:36:22 +00002076 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002077 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002078}
2079
2080static void cpu_notify_map_clients(void)
2081{
2082 MapClient *client;
2083
Blue Swirl72cf2d42009-09-12 07:36:22 +00002084 while (!QLIST_EMPTY(&map_client_list)) {
2085 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002086 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002087 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002088 }
2089}
2090
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002091bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2092{
2093 MemoryRegionSection *section;
2094 hwaddr l, xlat;
2095
2096 while (len > 0) {
2097 l = len;
2098 section = address_space_translate(as, addr, &xlat, &l, is_write);
2099 if (!memory_access_is_direct(section->mr, is_write)) {
2100 l = memory_access_size(l, addr);
2101 if (!memory_region_access_valid(section->mr, xlat, l, is_write)) {
2102 return false;
2103 }
2104 }
2105
2106 len -= l;
2107 addr += l;
2108 }
2109 return true;
2110}
2111
aliguori6d16c2f2009-01-22 16:59:11 +00002112/* Map a physical memory region into a host virtual address.
2113 * May map a subset of the requested range, given by and returned in *plen.
2114 * May return NULL if resources needed to perform the mapping are exhausted.
2115 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002116 * Use cpu_register_map_client() to know when retrying the map operation is
2117 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002118 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002119void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002120 hwaddr addr,
2121 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002122 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002123{
Avi Kivitya8170e52012-10-23 12:30:10 +02002124 hwaddr len = *plen;
2125 hwaddr todo = 0;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002126 hwaddr l, xlat;
Avi Kivityf3705d52012-03-08 16:16:34 +02002127 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002128 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002129 ram_addr_t rlen;
2130 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002131
2132 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002133 l = len;
2134 section = address_space_translate(as, addr, &xlat, &l, is_write);
aliguori6d16c2f2009-01-22 16:59:11 +00002135
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002136 if (!memory_access_is_direct(section->mr, is_write)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002137 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002138 break;
2139 }
2140 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2141 bounce.addr = addr;
2142 bounce.len = l;
2143 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002144 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002145 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002146
2147 *plen = l;
2148 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002149 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002150 if (!todo) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002151 raddr = memory_region_get_ram_addr(section->mr) + xlat;
2152 } else {
2153 if (memory_region_get_ram_addr(section->mr) + xlat != raddr + todo) {
2154 break;
2155 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002156 }
aliguori6d16c2f2009-01-22 16:59:11 +00002157
2158 len -= l;
2159 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002160 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002161 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002162 rlen = todo;
2163 ret = qemu_ram_ptr_length(raddr, &rlen);
2164 *plen = rlen;
2165 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002166}
2167
Avi Kivityac1970f2012-10-03 16:22:53 +02002168/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002169 * Will also mark the memory as dirty if is_write == 1. access_len gives
2170 * the amount of memory that was actually read or written by the caller.
2171 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002172void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2173 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002174{
2175 if (buffer != bounce.buffer) {
2176 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002177 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002178 while (access_len) {
2179 unsigned l;
2180 l = TARGET_PAGE_SIZE;
2181 if (l > access_len)
2182 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002183 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002184 addr1 += l;
2185 access_len -= l;
2186 }
2187 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002188 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002189 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002190 }
aliguori6d16c2f2009-01-22 16:59:11 +00002191 return;
2192 }
2193 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002194 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002195 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002196 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002197 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002198 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002199}
bellardd0ecd2a2006-04-23 17:14:48 +00002200
Avi Kivitya8170e52012-10-23 12:30:10 +02002201void *cpu_physical_memory_map(hwaddr addr,
2202 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002203 int is_write)
2204{
2205 return address_space_map(&address_space_memory, addr, plen, is_write);
2206}
2207
Avi Kivitya8170e52012-10-23 12:30:10 +02002208void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2209 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002210{
2211 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2212}
2213
bellard8df1cd02005-01-28 22:37:22 +00002214/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002215static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002216 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002217{
bellard8df1cd02005-01-28 22:37:22 +00002218 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002219 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002220 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002221 hwaddr l = 4;
2222 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002223
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002224 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2225 false);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002226 if (l < 4 || !memory_access_is_direct(section->mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002227 /* I/O case */
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002228 io_mem_read(section->mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002229#if defined(TARGET_WORDS_BIGENDIAN)
2230 if (endian == DEVICE_LITTLE_ENDIAN) {
2231 val = bswap32(val);
2232 }
2233#else
2234 if (endian == DEVICE_BIG_ENDIAN) {
2235 val = bswap32(val);
2236 }
2237#endif
bellard8df1cd02005-01-28 22:37:22 +00002238 } else {
2239 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002240 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002241 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002242 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002243 switch (endian) {
2244 case DEVICE_LITTLE_ENDIAN:
2245 val = ldl_le_p(ptr);
2246 break;
2247 case DEVICE_BIG_ENDIAN:
2248 val = ldl_be_p(ptr);
2249 break;
2250 default:
2251 val = ldl_p(ptr);
2252 break;
2253 }
bellard8df1cd02005-01-28 22:37:22 +00002254 }
2255 return val;
2256}
2257
Avi Kivitya8170e52012-10-23 12:30:10 +02002258uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002259{
2260 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2261}
2262
Avi Kivitya8170e52012-10-23 12:30:10 +02002263uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002264{
2265 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2266}
2267
Avi Kivitya8170e52012-10-23 12:30:10 +02002268uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002269{
2270 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2271}
2272
bellard84b7b8e2005-11-28 21:19:04 +00002273/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002274static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002275 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002276{
bellard84b7b8e2005-11-28 21:19:04 +00002277 uint8_t *ptr;
2278 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002279 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002280 hwaddr l = 8;
2281 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002282
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002283 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2284 false);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002285 if (l < 8 || !memory_access_is_direct(section->mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002286 /* I/O case */
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002287 io_mem_read(section->mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002288#if defined(TARGET_WORDS_BIGENDIAN)
2289 if (endian == DEVICE_LITTLE_ENDIAN) {
2290 val = bswap64(val);
2291 }
2292#else
2293 if (endian == DEVICE_BIG_ENDIAN) {
2294 val = bswap64(val);
2295 }
2296#endif
bellard84b7b8e2005-11-28 21:19:04 +00002297 } else {
2298 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002299 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002300 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002301 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002302 switch (endian) {
2303 case DEVICE_LITTLE_ENDIAN:
2304 val = ldq_le_p(ptr);
2305 break;
2306 case DEVICE_BIG_ENDIAN:
2307 val = ldq_be_p(ptr);
2308 break;
2309 default:
2310 val = ldq_p(ptr);
2311 break;
2312 }
bellard84b7b8e2005-11-28 21:19:04 +00002313 }
2314 return val;
2315}
2316
Avi Kivitya8170e52012-10-23 12:30:10 +02002317uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002318{
2319 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2320}
2321
Avi Kivitya8170e52012-10-23 12:30:10 +02002322uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002323{
2324 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2325}
2326
Avi Kivitya8170e52012-10-23 12:30:10 +02002327uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002328{
2329 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2330}
2331
bellardaab33092005-10-30 20:48:42 +00002332/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002333uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002334{
2335 uint8_t val;
2336 cpu_physical_memory_read(addr, &val, 1);
2337 return val;
2338}
2339
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002340/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002341static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002342 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002343{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002344 uint8_t *ptr;
2345 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002346 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002347 hwaddr l = 2;
2348 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002349
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002350 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2351 false);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002352 if (l < 2 || !memory_access_is_direct(section->mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002353 /* I/O case */
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002354 io_mem_read(section->mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002355#if defined(TARGET_WORDS_BIGENDIAN)
2356 if (endian == DEVICE_LITTLE_ENDIAN) {
2357 val = bswap16(val);
2358 }
2359#else
2360 if (endian == DEVICE_BIG_ENDIAN) {
2361 val = bswap16(val);
2362 }
2363#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002364 } else {
2365 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002366 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002367 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002368 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002369 switch (endian) {
2370 case DEVICE_LITTLE_ENDIAN:
2371 val = lduw_le_p(ptr);
2372 break;
2373 case DEVICE_BIG_ENDIAN:
2374 val = lduw_be_p(ptr);
2375 break;
2376 default:
2377 val = lduw_p(ptr);
2378 break;
2379 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002380 }
2381 return val;
bellardaab33092005-10-30 20:48:42 +00002382}
2383
Avi Kivitya8170e52012-10-23 12:30:10 +02002384uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002385{
2386 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2387}
2388
Avi Kivitya8170e52012-10-23 12:30:10 +02002389uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002390{
2391 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2392}
2393
Avi Kivitya8170e52012-10-23 12:30:10 +02002394uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002395{
2396 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2397}
2398
bellard8df1cd02005-01-28 22:37:22 +00002399/* warning: addr must be aligned. The ram page is not masked as dirty
2400 and the code inside is not invalidated. It is useful if the dirty
2401 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002402void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002403{
bellard8df1cd02005-01-28 22:37:22 +00002404 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002405 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002406 hwaddr l = 4;
2407 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002408
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002409 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2410 true);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002411 if (l < 4 || !memory_access_is_direct(section->mr, true)) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002412 io_mem_write(section->mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002413 } else {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002414 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002415 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002416 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002417
2418 if (unlikely(in_migration)) {
2419 if (!cpu_physical_memory_is_dirty(addr1)) {
2420 /* invalidate code */
2421 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2422 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002423 cpu_physical_memory_set_dirty_flags(
2424 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002425 }
2426 }
bellard8df1cd02005-01-28 22:37:22 +00002427 }
2428}
2429
2430/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002431static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002432 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002433{
bellard8df1cd02005-01-28 22:37:22 +00002434 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002435 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002436 hwaddr l = 4;
2437 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002438
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002439 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2440 true);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002441 if (l < 4 || !memory_access_is_direct(section->mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002442#if defined(TARGET_WORDS_BIGENDIAN)
2443 if (endian == DEVICE_LITTLE_ENDIAN) {
2444 val = bswap32(val);
2445 }
2446#else
2447 if (endian == DEVICE_BIG_ENDIAN) {
2448 val = bswap32(val);
2449 }
2450#endif
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002451 io_mem_write(section->mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002452 } else {
bellard8df1cd02005-01-28 22:37:22 +00002453 /* RAM case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002454 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002455 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002456 switch (endian) {
2457 case DEVICE_LITTLE_ENDIAN:
2458 stl_le_p(ptr, val);
2459 break;
2460 case DEVICE_BIG_ENDIAN:
2461 stl_be_p(ptr, val);
2462 break;
2463 default:
2464 stl_p(ptr, val);
2465 break;
2466 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002467 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002468 }
2469}
2470
Avi Kivitya8170e52012-10-23 12:30:10 +02002471void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002472{
2473 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2474}
2475
Avi Kivitya8170e52012-10-23 12:30:10 +02002476void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002477{
2478 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2479}
2480
Avi Kivitya8170e52012-10-23 12:30:10 +02002481void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002482{
2483 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2484}
2485
bellardaab33092005-10-30 20:48:42 +00002486/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002487void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002488{
2489 uint8_t v = val;
2490 cpu_physical_memory_write(addr, &v, 1);
2491}
2492
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002493/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002494static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002495 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002496{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002497 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002498 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002499 hwaddr l = 2;
2500 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002501
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002502 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2503 true);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002504 if (l < 2 || !memory_access_is_direct(section->mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002505#if defined(TARGET_WORDS_BIGENDIAN)
2506 if (endian == DEVICE_LITTLE_ENDIAN) {
2507 val = bswap16(val);
2508 }
2509#else
2510 if (endian == DEVICE_BIG_ENDIAN) {
2511 val = bswap16(val);
2512 }
2513#endif
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002514 io_mem_write(section->mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002515 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002516 /* RAM case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002517 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002518 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002519 switch (endian) {
2520 case DEVICE_LITTLE_ENDIAN:
2521 stw_le_p(ptr, val);
2522 break;
2523 case DEVICE_BIG_ENDIAN:
2524 stw_be_p(ptr, val);
2525 break;
2526 default:
2527 stw_p(ptr, val);
2528 break;
2529 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002530 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002531 }
bellardaab33092005-10-30 20:48:42 +00002532}
2533
Avi Kivitya8170e52012-10-23 12:30:10 +02002534void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002535{
2536 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2537}
2538
Avi Kivitya8170e52012-10-23 12:30:10 +02002539void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002540{
2541 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2542}
2543
Avi Kivitya8170e52012-10-23 12:30:10 +02002544void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002545{
2546 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2547}
2548
bellardaab33092005-10-30 20:48:42 +00002549/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002550void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002551{
2552 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002553 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002554}
2555
Avi Kivitya8170e52012-10-23 12:30:10 +02002556void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002557{
2558 val = cpu_to_le64(val);
2559 cpu_physical_memory_write(addr, &val, 8);
2560}
2561
Avi Kivitya8170e52012-10-23 12:30:10 +02002562void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002563{
2564 val = cpu_to_be64(val);
2565 cpu_physical_memory_write(addr, &val, 8);
2566}
2567
aliguori5e2972f2009-03-28 17:51:36 +00002568/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002569int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002570 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002571{
2572 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002573 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002574 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002575
2576 while (len > 0) {
2577 page = addr & TARGET_PAGE_MASK;
2578 phys_addr = cpu_get_phys_page_debug(env, page);
2579 /* if no physical page mapped, return an error */
2580 if (phys_addr == -1)
2581 return -1;
2582 l = (page + TARGET_PAGE_SIZE) - addr;
2583 if (l > len)
2584 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002585 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002586 if (is_write)
2587 cpu_physical_memory_write_rom(phys_addr, buf, l);
2588 else
aliguori5e2972f2009-03-28 17:51:36 +00002589 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002590 len -= l;
2591 buf += l;
2592 addr += l;
2593 }
2594 return 0;
2595}
Paul Brooka68fe892010-03-01 00:08:59 +00002596#endif
bellard13eb76e2004-01-24 15:23:36 +00002597
Blue Swirl8e4a4242013-01-06 18:30:17 +00002598#if !defined(CONFIG_USER_ONLY)
2599
2600/*
2601 * A helper function for the _utterly broken_ virtio device model to find out if
2602 * it's running on a big endian machine. Don't do this at home kids!
2603 */
2604bool virtio_is_big_endian(void);
2605bool virtio_is_big_endian(void)
2606{
2607#if defined(TARGET_WORDS_BIGENDIAN)
2608 return true;
2609#else
2610 return false;
2611#endif
2612}
2613
2614#endif
2615
Wen Congyang76f35532012-05-07 12:04:18 +08002616#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002617bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002618{
2619 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002620 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002621
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002622 section = address_space_translate(&address_space_memory,
2623 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002624
2625 return !(memory_region_is_ram(section->mr) ||
2626 memory_region_is_romd(section->mr));
2627}
2628#endif