blob: 86efed76bcd99d473a7b5ba5519633d0071a3e22 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020052
blueswir1db7b5422007-05-26 17:36:03 +000053//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000054
pbrook99773bd2006-04-16 15:14:59 +000055#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000056int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000057static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000058
Paolo Bonzinia3161032012-11-14 15:54:48 +010059RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030060
61static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030062static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030063
Avi Kivityf6790af2012-10-02 20:13:51 +020064AddressSpace address_space_io;
65AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +100066DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020067
Paolo Bonzini0844e002013-05-24 14:37:28 +020068MemoryRegion io_mem_rom, io_mem_notdirty;
69static MemoryRegion io_mem_unassigned, io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020070
pbrooke2eef172008-06-08 01:09:01 +000071#endif
bellard9fa3e852004-01-04 18:06:42 +000072
Andreas Färber9349b4f2012-03-14 01:38:32 +010073CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +000074/* current CPU in the current thread. It is only valid inside
75 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +010076DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +000077/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000078 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000079 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010080int use_icount;
bellard6a00d602005-11-21 23:25:50 +000081
pbrooke2eef172008-06-08 01:09:01 +000082#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020083
Avi Kivity5312bd82012-02-12 18:32:55 +020084static MemoryRegionSection *phys_sections;
85static unsigned phys_sections_nb, phys_sections_nb_alloc;
86static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +020087static uint16_t phys_section_notdirty;
88static uint16_t phys_section_rom;
89static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +020090
Avi Kivityd6f2ea22012-02-12 20:12:49 +020091/* Simple allocator for PhysPageEntry nodes */
92static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
93static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
94
Avi Kivity07f07b32012-02-13 20:45:32 +020095#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +020096
pbrooke2eef172008-06-08 01:09:01 +000097static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +030098static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +000099static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000100
Avi Kivity1ec9b902012-01-02 12:47:48 +0200101static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000102#endif
bellard54936002003-05-13 00:25:15 +0000103
Paul Brook6d9a1302010-02-28 23:55:53 +0000104#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200105
Avi Kivityf7bf5462012-02-13 20:12:05 +0200106static void phys_map_node_reserve(unsigned nodes)
107{
108 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
109 typedef PhysPageEntry Node[L2_SIZE];
110 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
111 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
112 phys_map_nodes_nb + nodes);
113 phys_map_nodes = g_renew(Node, phys_map_nodes,
114 phys_map_nodes_nb_alloc);
115 }
116}
117
118static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200119{
120 unsigned i;
121 uint16_t ret;
122
Avi Kivityf7bf5462012-02-13 20:12:05 +0200123 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200124 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200125 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200126 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200127 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200128 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200129 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200130 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200131}
132
133static void phys_map_nodes_reset(void)
134{
135 phys_map_nodes_nb = 0;
136}
137
Avi Kivityf7bf5462012-02-13 20:12:05 +0200138
Avi Kivitya8170e52012-10-23 12:30:10 +0200139static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
140 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200141 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200142{
143 PhysPageEntry *p;
144 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200145 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200146
Avi Kivity07f07b32012-02-13 20:45:32 +0200147 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200148 lp->ptr = phys_map_node_alloc();
149 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200150 if (level == 0) {
151 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200152 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200153 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200154 }
155 }
156 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200157 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200158 }
Avi Kivity29990972012-02-13 20:21:20 +0200159 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200160
Avi Kivity29990972012-02-13 20:21:20 +0200161 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200162 if ((*index & (step - 1)) == 0 && *nb >= step) {
163 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200164 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200165 *index += step;
166 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200167 } else {
168 phys_page_set_level(lp, index, nb, leaf, level - 1);
169 }
170 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200171 }
172}
173
Avi Kivityac1970f2012-10-03 16:22:53 +0200174static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200175 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200176 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000177{
Avi Kivity29990972012-02-13 20:21:20 +0200178 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200179 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000180
Avi Kivityac1970f2012-10-03 16:22:53 +0200181 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000182}
183
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200184static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000185{
Avi Kivityac1970f2012-10-03 16:22:53 +0200186 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200187 PhysPageEntry *p;
188 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200189
Avi Kivity07f07b32012-02-13 20:45:32 +0200190 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200191 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinifd298932013-05-20 12:21:07 +0200192 return &phys_sections[phys_section_unassigned];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200193 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200194 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200195 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200196 }
Paolo Bonzinifd298932013-05-20 12:21:07 +0200197 return &phys_sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200198}
199
Blue Swirle5548612012-04-21 13:08:33 +0000200bool memory_region_is_unassigned(MemoryRegion *mr)
201{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200202 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000203 && mr != &io_mem_watch;
204}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200205
Jan Kiszka9f029602013-05-06 16:48:02 +0200206static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
207 hwaddr addr)
208{
209 return phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
210}
211
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200212MemoryRegionSection *address_space_translate(AddressSpace *as, hwaddr addr,
213 hwaddr *xlat, hwaddr *plen,
214 bool is_write)
215{
216 MemoryRegionSection *section;
217 Int128 diff;
218
Jan Kiszka9f029602013-05-06 16:48:02 +0200219 section = address_space_lookup_region(as, addr);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200220 /* Compute offset within MemoryRegionSection */
221 addr -= section->offset_within_address_space;
222
223 /* Compute offset within MemoryRegion */
224 *xlat = addr + section->offset_within_region;
225
226 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100227 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200228 return section;
229}
bellard9fa3e852004-01-04 18:06:42 +0000230#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000231
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200232void cpu_exec_init_all(void)
233{
234#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700235 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200236 memory_map_init();
237 io_mem_init();
238#endif
239}
240
Andreas Färberb170fce2013-01-20 20:23:22 +0100241#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000242
Juan Quintelae59fb372009-09-29 22:48:21 +0200243static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200244{
Andreas Färber259186a2013-01-17 18:51:17 +0100245 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200246
aurel323098dba2009-03-07 21:28:24 +0000247 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
248 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100249 cpu->interrupt_request &= ~0x01;
250 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000251
252 return 0;
253}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200254
255static const VMStateDescription vmstate_cpu_common = {
256 .name = "cpu_common",
257 .version_id = 1,
258 .minimum_version_id = 1,
259 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200260 .post_load = cpu_common_post_load,
261 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100262 VMSTATE_UINT32(halted, CPUState),
263 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200264 VMSTATE_END_OF_LIST()
265 }
266};
Andreas Färberb170fce2013-01-20 20:23:22 +0100267#else
268#define vmstate_cpu_common vmstate_dummy
pbrook9656f322008-07-01 20:01:19 +0000269#endif
270
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100271CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400272{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100273 CPUArchState *env = first_cpu;
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100274 CPUState *cpu = NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400275
276 while (env) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100277 cpu = ENV_GET_CPU(env);
278 if (cpu->cpu_index == index) {
Glauber Costa950f1472009-06-09 12:15:18 -0400279 break;
Andreas Färber55e5c282012-12-17 06:18:02 +0100280 }
Glauber Costa950f1472009-06-09 12:15:18 -0400281 env = env->next_cpu;
282 }
283
Igor Mammedovd76fdda2013-03-07 19:12:43 +0100284 return env ? cpu : NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400285}
286
Michael S. Tsirkind6b9e0d2013-04-24 22:58:04 +0200287void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
288{
289 CPUArchState *env = first_cpu;
290
291 while (env) {
292 func(ENV_GET_CPU(env), data);
293 env = env->next_cpu;
294 }
295}
296
Andreas Färber9349b4f2012-03-14 01:38:32 +0100297void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000298{
Andreas Färber9f09e182012-05-03 06:59:07 +0200299 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100300 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100301 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000302 int cpu_index;
303
pbrookc2764712009-03-07 15:24:59 +0000304#if defined(CONFIG_USER_ONLY)
305 cpu_list_lock();
306#endif
bellard6a00d602005-11-21 23:25:50 +0000307 env->next_cpu = NULL;
308 penv = &first_cpu;
309 cpu_index = 0;
310 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700311 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000312 cpu_index++;
313 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100314 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100315 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000316 QTAILQ_INIT(&env->breakpoints);
317 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100318#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200319 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100320#endif
bellard6a00d602005-11-21 23:25:50 +0000321 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000322#if defined(CONFIG_USER_ONLY)
323 cpu_list_unlock();
324#endif
Andreas Färber259186a2013-01-17 18:51:17 +0100325 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
pbrookb3c77242008-06-30 16:31:04 +0000326#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600327 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000328 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100329 assert(cc->vmsd == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000330#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100331 if (cc->vmsd != NULL) {
332 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
333 }
bellardfd6ce8f2003-05-14 19:00:11 +0000334}
335
bellard1fddef42005-04-17 19:16:13 +0000336#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000337#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100338static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000339{
340 tb_invalidate_phys_page_range(pc, pc + 1, 0);
341}
342#else
Max Filippov1e7855a2012-04-10 02:48:17 +0400343static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
344{
Max Filippov9d70c4b2012-05-27 20:21:08 +0400345 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
346 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400347}
bellardc27004e2005-01-03 23:35:10 +0000348#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000349#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000350
Paul Brookc527ee82010-03-01 03:31:14 +0000351#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100352void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000353
354{
355}
356
Andreas Färber9349b4f2012-03-14 01:38:32 +0100357int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000358 int flags, CPUWatchpoint **watchpoint)
359{
360 return -ENOSYS;
361}
362#else
pbrook6658ffb2007-03-16 23:58:11 +0000363/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100364int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000365 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000366{
aliguorib4051332008-11-18 20:14:20 +0000367 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000368 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000369
aliguorib4051332008-11-18 20:14:20 +0000370 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400371 if ((len & (len - 1)) || (addr & ~len_mask) ||
372 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000373 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
374 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
375 return -EINVAL;
376 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500377 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000378
aliguoria1d1bb32008-11-18 20:07:32 +0000379 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000380 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000381 wp->flags = flags;
382
aliguori2dc9f412008-11-18 20:56:59 +0000383 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000384 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000385 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000386 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000387 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000388
pbrook6658ffb2007-03-16 23:58:11 +0000389 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000390
391 if (watchpoint)
392 *watchpoint = wp;
393 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000394}
395
aliguoria1d1bb32008-11-18 20:07:32 +0000396/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100397int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000398 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000399{
aliguorib4051332008-11-18 20:14:20 +0000400 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000401 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000402
Blue Swirl72cf2d42009-09-12 07:36:22 +0000403 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000404 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000405 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000406 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000407 return 0;
408 }
409 }
aliguoria1d1bb32008-11-18 20:07:32 +0000410 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000411}
412
aliguoria1d1bb32008-11-18 20:07:32 +0000413/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100414void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000415{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000416 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000417
aliguoria1d1bb32008-11-18 20:07:32 +0000418 tlb_flush_page(env, watchpoint->vaddr);
419
Anthony Liguori7267c092011-08-20 22:09:37 -0500420 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000421}
422
aliguoria1d1bb32008-11-18 20:07:32 +0000423/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100424void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000425{
aliguoric0ce9982008-11-25 22:13:57 +0000426 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000427
Blue Swirl72cf2d42009-09-12 07:36:22 +0000428 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000429 if (wp->flags & mask)
430 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000431 }
aliguoria1d1bb32008-11-18 20:07:32 +0000432}
Paul Brookc527ee82010-03-01 03:31:14 +0000433#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000434
435/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100436int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000437 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000438{
bellard1fddef42005-04-17 19:16:13 +0000439#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000440 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000441
Anthony Liguori7267c092011-08-20 22:09:37 -0500442 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000443
444 bp->pc = pc;
445 bp->flags = flags;
446
aliguori2dc9f412008-11-18 20:56:59 +0000447 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000448 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000449 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000450 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000451 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000452
453 breakpoint_invalidate(env, pc);
454
455 if (breakpoint)
456 *breakpoint = bp;
457 return 0;
458#else
459 return -ENOSYS;
460#endif
461}
462
463/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100464int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000465{
466#if defined(TARGET_HAS_ICE)
467 CPUBreakpoint *bp;
468
Blue Swirl72cf2d42009-09-12 07:36:22 +0000469 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000470 if (bp->pc == pc && bp->flags == flags) {
471 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000472 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000473 }
bellard4c3a88a2003-07-26 12:06:08 +0000474 }
aliguoria1d1bb32008-11-18 20:07:32 +0000475 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000476#else
aliguoria1d1bb32008-11-18 20:07:32 +0000477 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000478#endif
479}
480
aliguoria1d1bb32008-11-18 20:07:32 +0000481/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100482void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000483{
bellard1fddef42005-04-17 19:16:13 +0000484#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000485 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000486
aliguoria1d1bb32008-11-18 20:07:32 +0000487 breakpoint_invalidate(env, breakpoint->pc);
488
Anthony Liguori7267c092011-08-20 22:09:37 -0500489 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000490#endif
491}
492
493/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100494void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000495{
496#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000497 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000498
Blue Swirl72cf2d42009-09-12 07:36:22 +0000499 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000500 if (bp->flags & mask)
501 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000502 }
bellard4c3a88a2003-07-26 12:06:08 +0000503#endif
504}
505
bellardc33a3462003-07-29 20:50:33 +0000506/* enable or disable single step mode. EXCP_DEBUG is returned by the
507 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100508void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000509{
bellard1fddef42005-04-17 19:16:13 +0000510#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +0000511 if (env->singlestep_enabled != enabled) {
512 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +0000513 if (kvm_enabled())
514 kvm_update_guest_debug(env, 0);
515 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100516 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000517 /* XXX: only flush what is necessary */
518 tb_flush(env);
519 }
bellardc33a3462003-07-29 20:50:33 +0000520 }
521#endif
522}
523
Andreas Färber9349b4f2012-03-14 01:38:32 +0100524void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +0000525{
Andreas Färberfcd7d002012-12-17 08:02:44 +0100526 CPUState *cpu = ENV_GET_CPU(env);
527
528 cpu->exit_request = 1;
Peter Maydell378df4b2013-02-22 18:10:03 +0000529 cpu->tcg_exit_req = 1;
aurel323098dba2009-03-07 21:28:24 +0000530}
531
Andreas Färber9349b4f2012-03-14 01:38:32 +0100532void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000533{
534 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000535 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000536
537 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000538 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000539 fprintf(stderr, "qemu: fatal: ");
540 vfprintf(stderr, fmt, ap);
541 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100542 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000543 if (qemu_log_enabled()) {
544 qemu_log("qemu: fatal: ");
545 qemu_log_vprintf(fmt, ap2);
546 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +0100547 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000548 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000549 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000550 }
pbrook493ae1f2007-11-23 16:53:59 +0000551 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000552 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200553#if defined(CONFIG_USER_ONLY)
554 {
555 struct sigaction act;
556 sigfillset(&act.sa_mask);
557 act.sa_handler = SIG_DFL;
558 sigaction(SIGABRT, &act, NULL);
559 }
560#endif
bellard75012672003-06-21 13:11:07 +0000561 abort();
562}
563
Andreas Färber9349b4f2012-03-14 01:38:32 +0100564CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +0000565{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100566 CPUArchState *new_env = cpu_init(env->cpu_model_str);
567 CPUArchState *next_cpu = new_env->next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000568#if defined(TARGET_HAS_ICE)
569 CPUBreakpoint *bp;
570 CPUWatchpoint *wp;
571#endif
572
Andreas Färber9349b4f2012-03-14 01:38:32 +0100573 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +0000574
Andreas Färber55e5c282012-12-17 06:18:02 +0100575 /* Preserve chaining. */
thsc5be9f02007-02-28 20:20:53 +0000576 new_env->next_cpu = next_cpu;
aliguori5a38f082009-01-15 20:16:51 +0000577
578 /* Clone all break/watchpoints.
579 Note: Once we support ptrace with hw-debug register access, make sure
580 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +0000581 QTAILQ_INIT(&env->breakpoints);
582 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +0000583#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000584 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000585 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
586 }
Blue Swirl72cf2d42009-09-12 07:36:22 +0000587 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +0000588 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
589 wp->flags, NULL);
590 }
591#endif
592
thsc5be9f02007-02-28 20:20:53 +0000593 return new_env;
594}
595
bellard01243112004-01-04 15:48:17 +0000596#if !defined(CONFIG_USER_ONLY)
Juan Quintelad24981d2012-05-22 00:42:40 +0200597static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
598 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000599{
Juan Quintelad24981d2012-05-22 00:42:40 +0200600 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +0000601
bellard1ccde1c2004-02-06 19:46:14 +0000602 /* we modify the TLB cache so that the dirty bit will be set again
603 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200604 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +0200605 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +0000606 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200607 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +0000608 != (end - 1) - start) {
609 abort();
610 }
Blue Swirle5548612012-04-21 13:08:33 +0000611 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200612
613}
614
615/* Note: start and end must be within the same ram block. */
616void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
617 int dirty_flags)
618{
619 uintptr_t length;
620
621 start &= TARGET_PAGE_MASK;
622 end = TARGET_PAGE_ALIGN(end);
623
624 length = end - start;
625 if (length == 0)
626 return;
627 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
628
629 if (tcg_enabled()) {
630 tlb_reset_dirty_range_all(start, end, length);
631 }
bellard1ccde1c2004-02-06 19:46:14 +0000632}
633
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000634static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000635{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200636 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000637 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200638 return ret;
aliguori74576192008-10-06 14:02:03 +0000639}
640
Avi Kivitya8170e52012-10-23 12:30:10 +0200641hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200642 MemoryRegionSection *section,
643 target_ulong vaddr,
644 hwaddr paddr, hwaddr xlat,
645 int prot,
646 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000647{
Avi Kivitya8170e52012-10-23 12:30:10 +0200648 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000649 CPUWatchpoint *wp;
650
Blue Swirlcc5bea62012-04-14 14:56:48 +0000651 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000652 /* Normal RAM. */
653 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200654 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000655 if (!section->readonly) {
656 iotlb |= phys_section_notdirty;
657 } else {
658 iotlb |= phys_section_rom;
659 }
660 } else {
Blue Swirle5548612012-04-21 13:08:33 +0000661 iotlb = section - phys_sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200662 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000663 }
664
665 /* Make accesses to pages with watchpoints go via the
666 watchpoint trap routines. */
667 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
668 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
669 /* Avoid trapping reads of pages with a write breakpoint. */
670 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
671 iotlb = phys_section_watch + paddr;
672 *address |= TLB_MMIO;
673 break;
674 }
675 }
676 }
677
678 return iotlb;
679}
bellard9fa3e852004-01-04 18:06:42 +0000680#endif /* defined(CONFIG_USER_ONLY) */
681
pbrooke2eef172008-06-08 01:09:01 +0000682#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000683
Paul Brookc04b2b72010-03-01 03:31:14 +0000684#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
685typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +0200686 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +0200687 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +0200688 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +0000689} subpage_t;
690
Anthony Liguoric227f092009-10-01 16:12:16 -0500691static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200692 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +0200693static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +0200694static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +0200695{
Avi Kivity5312bd82012-02-12 18:32:55 +0200696 MemoryRegionSection *section = &phys_sections[section_index];
697 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +0200698
699 if (mr->subpage) {
700 subpage_t *subpage = container_of(mr, subpage_t, iomem);
701 memory_region_destroy(&subpage->iomem);
702 g_free(subpage);
703 }
704}
705
Avi Kivity4346ae32012-02-10 17:00:01 +0200706static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +0200707{
708 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200709 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +0200710
Avi Kivityc19e8802012-02-13 20:25:31 +0200711 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +0200712 return;
713 }
714
Avi Kivityc19e8802012-02-13 20:25:31 +0200715 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +0200716 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200717 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +0200718 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +0200719 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200720 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +0200721 }
Avi Kivity54688b12012-02-09 17:34:32 +0200722 }
Avi Kivity07f07b32012-02-13 20:45:32 +0200723 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200724 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +0200725}
726
Avi Kivityac1970f2012-10-03 16:22:53 +0200727static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +0200728{
Avi Kivityac1970f2012-10-03 16:22:53 +0200729 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200730 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +0200731}
732
Avi Kivity5312bd82012-02-12 18:32:55 +0200733static uint16_t phys_section_add(MemoryRegionSection *section)
734{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200735 /* The physical section number is ORed with a page-aligned
736 * pointer to produce the iotlb entries. Thus it should
737 * never overflow into the page-aligned value.
738 */
739 assert(phys_sections_nb < TARGET_PAGE_SIZE);
740
Avi Kivity5312bd82012-02-12 18:32:55 +0200741 if (phys_sections_nb == phys_sections_nb_alloc) {
742 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
743 phys_sections = g_renew(MemoryRegionSection, phys_sections,
744 phys_sections_nb_alloc);
745 }
746 phys_sections[phys_sections_nb] = *section;
747 return phys_sections_nb++;
748}
749
750static void phys_sections_clear(void)
751{
752 phys_sections_nb = 0;
753}
754
Avi Kivityac1970f2012-10-03 16:22:53 +0200755static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200756{
757 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200758 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200759 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200760 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200761 MemoryRegionSection subsection = {
762 .offset_within_address_space = base,
763 .size = TARGET_PAGE_SIZE,
764 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200765 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200766
Avi Kivityf3705d52012-03-08 16:16:34 +0200767 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200768
Avi Kivityf3705d52012-03-08 16:16:34 +0200769 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +0200770 subpage = subpage_init(base);
771 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200772 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200773 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200774 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200775 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200776 }
777 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -0400778 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200779 subpage_register(subpage, start, end, phys_section_add(section));
780}
781
782
Avi Kivityac1970f2012-10-03 16:22:53 +0200783static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000784{
Avi Kivitya8170e52012-10-23 12:30:10 +0200785 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +0200786 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +0200787 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +0200788 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +0200789
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200790 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200791
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +0200792 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +0200793 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +0200794 section_index);
bellard33417e72003-08-10 21:47:01 +0000795}
796
Avi Kivity86a86232012-10-30 13:47:45 +0200797QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > MAX_PHYS_ADDR_SPACE_BITS)
798
799static MemoryRegionSection limit(MemoryRegionSection section)
800{
801 section.size = MIN(section.offset_within_address_space + section.size,
802 MAX_PHYS_ADDR + 1)
803 - section.offset_within_address_space;
804
805 return section;
806}
807
Avi Kivityac1970f2012-10-03 16:22:53 +0200808static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200809{
Avi Kivityac1970f2012-10-03 16:22:53 +0200810 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity86a86232012-10-30 13:47:45 +0200811 MemoryRegionSection now = limit(*section), remain = limit(*section);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200812
813 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
814 || (now.size < TARGET_PAGE_SIZE)) {
815 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
816 - now.offset_within_address_space,
817 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200818 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200819 remain.size -= now.size;
820 remain.offset_within_address_space += now.size;
821 remain.offset_within_region += now.size;
822 }
Tyler Hall69b67642012-07-25 18:45:04 -0400823 while (remain.size >= TARGET_PAGE_SIZE) {
824 now = remain;
825 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
826 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +0200827 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400828 } else {
829 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +0200830 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400831 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200832 remain.size -= now.size;
833 remain.offset_within_address_space += now.size;
834 remain.offset_within_region += now.size;
835 }
836 now = remain;
837 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +0200838 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200839 }
840}
841
Sheng Yang62a27442010-01-26 19:21:16 +0800842void qemu_flush_coalesced_mmio_buffer(void)
843{
844 if (kvm_enabled())
845 kvm_flush_coalesced_mmio_buffer();
846}
847
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700848void qemu_mutex_lock_ramlist(void)
849{
850 qemu_mutex_lock(&ram_list.mutex);
851}
852
853void qemu_mutex_unlock_ramlist(void)
854{
855 qemu_mutex_unlock(&ram_list.mutex);
856}
857
Marcelo Tosattic9027602010-03-01 20:25:08 -0300858#if defined(__linux__) && !defined(TARGET_S390X)
859
860#include <sys/vfs.h>
861
862#define HUGETLBFS_MAGIC 0x958458f6
863
864static long gethugepagesize(const char *path)
865{
866 struct statfs fs;
867 int ret;
868
869 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900870 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300871 } while (ret != 0 && errno == EINTR);
872
873 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900874 perror(path);
875 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300876 }
877
878 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900879 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300880
881 return fs.f_bsize;
882}
883
Alex Williamson04b16652010-07-02 11:13:17 -0600884static void *file_ram_alloc(RAMBlock *block,
885 ram_addr_t memory,
886 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300887{
888 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500889 char *sanitized_name;
890 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300891 void *area;
892 int fd;
893#ifdef MAP_POPULATE
894 int flags;
895#endif
896 unsigned long hpagesize;
897
898 hpagesize = gethugepagesize(path);
899 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900900 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300901 }
902
903 if (memory < hpagesize) {
904 return NULL;
905 }
906
907 if (kvm_enabled() && !kvm_has_sync_mmu()) {
908 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
909 return NULL;
910 }
911
Peter Feiner8ca761f2013-03-04 13:54:25 -0500912 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
913 sanitized_name = g_strdup(block->mr->name);
914 for (c = sanitized_name; *c != '\0'; c++) {
915 if (*c == '/')
916 *c = '_';
917 }
918
919 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
920 sanitized_name);
921 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300922
923 fd = mkstemp(filename);
924 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900925 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100926 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900927 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300928 }
929 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100930 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300931
932 memory = (memory+hpagesize-1) & ~(hpagesize-1);
933
934 /*
935 * ftruncate is not supported by hugetlbfs in older
936 * hosts, so don't bother bailing out on errors.
937 * If anything goes wrong with it under other filesystems,
938 * mmap will fail.
939 */
940 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900941 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300942
943#ifdef MAP_POPULATE
944 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
945 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
946 * to sidestep this quirk.
947 */
948 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
949 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
950#else
951 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
952#endif
953 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900954 perror("file_ram_alloc: can't mmap RAM pages");
955 close(fd);
956 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300957 }
Alex Williamson04b16652010-07-02 11:13:17 -0600958 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300959 return area;
960}
961#endif
962
Alex Williamsond17b5282010-06-25 11:08:38 -0600963static ram_addr_t find_ram_offset(ram_addr_t size)
964{
Alex Williamson04b16652010-07-02 11:13:17 -0600965 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600966 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600967
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100968 assert(size != 0); /* it would hand out same offset multiple times */
969
Paolo Bonzinia3161032012-11-14 15:54:48 +0100970 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -0600971 return 0;
972
Paolo Bonzinia3161032012-11-14 15:54:48 +0100973 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +0000974 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600975
976 end = block->offset + block->length;
977
Paolo Bonzinia3161032012-11-14 15:54:48 +0100978 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -0600979 if (next_block->offset >= end) {
980 next = MIN(next, next_block->offset);
981 }
982 }
983 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -0600984 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -0600985 mingap = next - end;
986 }
987 }
Alex Williamson3e837b22011-10-31 08:54:09 -0600988
989 if (offset == RAM_ADDR_MAX) {
990 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
991 (uint64_t)size);
992 abort();
993 }
994
Alex Williamson04b16652010-07-02 11:13:17 -0600995 return offset;
996}
997
Juan Quintela652d7ec2012-07-20 10:37:54 +0200998ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -0600999{
Alex Williamsond17b5282010-06-25 11:08:38 -06001000 RAMBlock *block;
1001 ram_addr_t last = 0;
1002
Paolo Bonzinia3161032012-11-14 15:54:48 +01001003 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001004 last = MAX(last, block->offset + block->length);
1005
1006 return last;
1007}
1008
Jason Baronddb97f12012-08-02 15:44:16 -04001009static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1010{
1011 int ret;
1012 QemuOpts *machine_opts;
1013
1014 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1015 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1016 if (machine_opts &&
1017 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
1018 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1019 if (ret) {
1020 perror("qemu_madvise");
1021 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1022 "but dump_guest_core=off specified\n");
1023 }
1024 }
1025}
1026
Avi Kivityc5705a72011-12-20 15:59:12 +02001027void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001028{
1029 RAMBlock *new_block, *block;
1030
Avi Kivityc5705a72011-12-20 15:59:12 +02001031 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001032 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001033 if (block->offset == addr) {
1034 new_block = block;
1035 break;
1036 }
1037 }
1038 assert(new_block);
1039 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001040
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001041 if (dev) {
1042 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001043 if (id) {
1044 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001045 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001046 }
1047 }
1048 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1049
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001050 /* This assumes the iothread lock is taken here too. */
1051 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001052 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001053 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001054 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1055 new_block->idstr);
1056 abort();
1057 }
1058 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001059 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001060}
1061
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001062static int memory_try_enable_merging(void *addr, size_t len)
1063{
1064 QemuOpts *opts;
1065
1066 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
1067 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
1068 /* disabled by the user */
1069 return 0;
1070 }
1071
1072 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1073}
1074
Avi Kivityc5705a72011-12-20 15:59:12 +02001075ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1076 MemoryRegion *mr)
1077{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001078 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001079
1080 size = TARGET_PAGE_ALIGN(size);
1081 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06001082
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001083 /* This assumes the iothread lock is taken here too. */
1084 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001085 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001086 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001087 if (host) {
1088 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001089 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001090 } else {
1091 if (mem_path) {
1092#if defined (__linux__) && !defined(TARGET_S390X)
1093 new_block->host = file_ram_alloc(new_block, size, mem_path);
1094 if (!new_block->host) {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001095 new_block->host = qemu_anon_ram_alloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001096 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001097 }
1098#else
1099 fprintf(stderr, "-mem-path option unsupported\n");
1100 exit(1);
1101#endif
1102 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001103 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02001104 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00001105 } else if (kvm_enabled()) {
1106 /* some s390/kvm configurations have special constraints */
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001107 new_block->host = kvm_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001108 } else {
Paolo Bonzini6eebf952013-05-13 16:19:55 +02001109 new_block->host = qemu_anon_ram_alloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01001110 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001111 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001112 }
1113 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001114 new_block->length = size;
1115
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001116 /* Keep the list sorted from biggest to smallest block. */
1117 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1118 if (block->length < new_block->length) {
1119 break;
1120 }
1121 }
1122 if (block) {
1123 QTAILQ_INSERT_BEFORE(block, new_block, next);
1124 } else {
1125 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1126 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001127 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001128
Umesh Deshpandef798b072011-08-18 11:41:17 -07001129 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001130 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001131
Anthony Liguori7267c092011-08-20 22:09:37 -05001132 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001133 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001134 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1135 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001136 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001137
Jason Baronddb97f12012-08-02 15:44:16 -04001138 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001139 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04001140
Cam Macdonell84b89d72010-07-26 18:10:57 -06001141 if (kvm_enabled())
1142 kvm_setup_guest_memory(new_block->host, size);
1143
1144 return new_block->offset;
1145}
1146
Avi Kivityc5705a72011-12-20 15:59:12 +02001147ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001148{
Avi Kivityc5705a72011-12-20 15:59:12 +02001149 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001150}
bellarde9a1ab12007-02-08 23:08:38 +00001151
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001152void qemu_ram_free_from_ptr(ram_addr_t addr)
1153{
1154 RAMBlock *block;
1155
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001156 /* This assumes the iothread lock is taken here too. */
1157 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001158 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001159 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001160 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001161 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001162 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001163 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001164 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001165 }
1166 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001167 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001168}
1169
Anthony Liguoric227f092009-10-01 16:12:16 -05001170void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001171{
Alex Williamson04b16652010-07-02 11:13:17 -06001172 RAMBlock *block;
1173
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001174 /* This assumes the iothread lock is taken here too. */
1175 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001176 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001177 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001178 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001179 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001180 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001181 if (block->flags & RAM_PREALLOC_MASK) {
1182 ;
1183 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06001184#if defined (__linux__) && !defined(TARGET_S390X)
1185 if (block->fd) {
1186 munmap(block->host, block->length);
1187 close(block->fd);
1188 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001189 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001190 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001191#else
1192 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06001193#endif
1194 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02001195 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001196 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01001197 } else {
Paolo Bonzinie7a09b92013-05-13 16:19:56 +02001198 qemu_anon_ram_free(block->host, block->length);
Jun Nakajima432d2682010-08-31 16:41:25 +01001199 }
Alex Williamson04b16652010-07-02 11:13:17 -06001200 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001201 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001202 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001203 }
1204 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001205 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001206
bellarde9a1ab12007-02-08 23:08:38 +00001207}
1208
Huang Yingcd19cfa2011-03-02 08:56:19 +01001209#ifndef _WIN32
1210void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1211{
1212 RAMBlock *block;
1213 ram_addr_t offset;
1214 int flags;
1215 void *area, *vaddr;
1216
Paolo Bonzinia3161032012-11-14 15:54:48 +01001217 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001218 offset = addr - block->offset;
1219 if (offset < block->length) {
1220 vaddr = block->host + offset;
1221 if (block->flags & RAM_PREALLOC_MASK) {
1222 ;
1223 } else {
1224 flags = MAP_FIXED;
1225 munmap(vaddr, length);
1226 if (mem_path) {
1227#if defined(__linux__) && !defined(TARGET_S390X)
1228 if (block->fd) {
1229#ifdef MAP_POPULATE
1230 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1231 MAP_PRIVATE;
1232#else
1233 flags |= MAP_PRIVATE;
1234#endif
1235 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1236 flags, block->fd, offset);
1237 } else {
1238 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1239 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1240 flags, -1, 0);
1241 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01001242#else
1243 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001244#endif
1245 } else {
1246#if defined(TARGET_S390X) && defined(CONFIG_KVM)
1247 flags |= MAP_SHARED | MAP_ANONYMOUS;
1248 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
1249 flags, -1, 0);
1250#else
1251 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1252 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1253 flags, -1, 0);
1254#endif
1255 }
1256 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001257 fprintf(stderr, "Could not remap addr: "
1258 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001259 length, addr);
1260 exit(1);
1261 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001262 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001263 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001264 }
1265 return;
1266 }
1267 }
1268}
1269#endif /* !_WIN32 */
1270
pbrookdc828ca2009-04-09 22:21:07 +00001271/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00001272 With the exception of the softmmu code in this file, this should
1273 only be used for local memory (e.g. video ram) that the device owns,
1274 and knows it isn't going to access beyond the end of the block.
1275
1276 It should not be used for general purpose DMA.
1277 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1278 */
Anthony Liguoric227f092009-10-01 16:12:16 -05001279void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00001280{
pbrook94a6b542009-04-11 17:15:54 +00001281 RAMBlock *block;
1282
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001283 /* The list is protected by the iothread lock here. */
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001284 block = ram_list.mru_block;
1285 if (block && addr - block->offset < block->length) {
1286 goto found;
1287 }
Paolo Bonzinia3161032012-11-14 15:54:48 +01001288 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamsonf471a172010-06-11 11:11:42 -06001289 if (addr - block->offset < block->length) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001290 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001291 }
pbrook94a6b542009-04-11 17:15:54 +00001292 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001293
1294 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1295 abort();
1296
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001297found:
1298 ram_list.mru_block = block;
1299 if (xen_enabled()) {
1300 /* We need to check if the requested address is in the RAM
1301 * because we don't want to map the entire memory in QEMU.
1302 * In that case just map until the end of the page.
1303 */
1304 if (block->offset == 0) {
1305 return xen_map_cache(addr, 0, 0);
1306 } else if (block->host == NULL) {
1307 block->host =
1308 xen_map_cache(block->offset, block->length, 1);
1309 }
1310 }
1311 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001312}
1313
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001314/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as
1315 * qemu_get_ram_ptr but do not touch ram_list.mru_block.
1316 *
1317 * ??? Is this still necessary?
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001318 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001319static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001320{
1321 RAMBlock *block;
1322
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001323 /* The list is protected by the iothread lock here. */
Paolo Bonzinia3161032012-11-14 15:54:48 +01001324 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001325 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02001326 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001327 /* We need to check if the requested address is in the RAM
1328 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001329 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01001330 */
1331 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001332 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01001333 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001334 block->host =
1335 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01001336 }
1337 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02001338 return block->host + (addr - block->offset);
1339 }
1340 }
1341
1342 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1343 abort();
1344
1345 return NULL;
1346}
1347
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001348/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1349 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001350static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001351{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001352 if (*size == 0) {
1353 return NULL;
1354 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001355 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001356 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001357 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001358 RAMBlock *block;
1359
Paolo Bonzinia3161032012-11-14 15:54:48 +01001360 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001361 if (addr - block->offset < block->length) {
1362 if (addr - block->offset + *size > block->length)
1363 *size = block->length - addr + block->offset;
1364 return block->host + (addr - block->offset);
1365 }
1366 }
1367
1368 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1369 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001370 }
1371}
1372
Marcelo Tosattie8902612010-10-11 15:31:19 -03001373int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001374{
pbrook94a6b542009-04-11 17:15:54 +00001375 RAMBlock *block;
1376 uint8_t *host = ptr;
1377
Jan Kiszka868bb332011-06-21 22:59:09 +02001378 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001379 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001380 return 0;
1381 }
1382
Paolo Bonzinia3161032012-11-14 15:54:48 +01001383 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001384 /* This case append when the block is not mapped. */
1385 if (block->host == NULL) {
1386 continue;
1387 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001388 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03001389 *ram_addr = block->offset + (host - block->host);
1390 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06001391 }
pbrook94a6b542009-04-11 17:15:54 +00001392 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001393
Marcelo Tosattie8902612010-10-11 15:31:19 -03001394 return -1;
1395}
Alex Williamsonf471a172010-06-11 11:11:42 -06001396
Marcelo Tosattie8902612010-10-11 15:31:19 -03001397/* Some of the softmmu routines need to translate from a host pointer
1398 (typically a TLB entry) back to a ram offset. */
1399ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1400{
1401 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06001402
Marcelo Tosattie8902612010-10-11 15:31:19 -03001403 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1404 fprintf(stderr, "Bad ram pointer %p\n", ptr);
1405 abort();
1406 }
1407 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00001408}
1409
Avi Kivitya8170e52012-10-23 12:30:10 +02001410static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001411 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001412{
bellard3a7d9292005-08-21 09:26:42 +00001413 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001414 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001415 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001416 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001417 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001418 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001419 switch (size) {
1420 case 1:
1421 stb_p(qemu_get_ram_ptr(ram_addr), val);
1422 break;
1423 case 2:
1424 stw_p(qemu_get_ram_ptr(ram_addr), val);
1425 break;
1426 case 4:
1427 stl_p(qemu_get_ram_ptr(ram_addr), val);
1428 break;
1429 default:
1430 abort();
1431 }
bellardf23db162005-08-21 19:12:28 +00001432 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001433 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001434 /* we remove the notdirty callback only if the code has been
1435 flushed */
1436 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00001437 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00001438}
1439
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001440static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1441 unsigned size, bool is_write)
1442{
1443 return is_write;
1444}
1445
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001446static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001447 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001448 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001449 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001450};
1451
pbrook0f459d12008-06-09 00:20:13 +00001452/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001453static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001454{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001455 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00001456 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001457 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001458 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001459 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001460
aliguori06d55cc2008-11-18 20:24:06 +00001461 if (env->watchpoint_hit) {
1462 /* We re-entered the check after replacing the TB. Now raise
1463 * the debug interrupt so that is will trigger after the
1464 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001465 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001466 return;
1467 }
pbrook2e70f6e2008-06-29 01:03:05 +00001468 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001469 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001470 if ((vaddr == (wp->vaddr & len_mask) ||
1471 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001472 wp->flags |= BP_WATCHPOINT_HIT;
1473 if (!env->watchpoint_hit) {
1474 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001475 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001476 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1477 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001478 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001479 } else {
1480 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1481 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001482 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001483 }
aliguori06d55cc2008-11-18 20:24:06 +00001484 }
aliguori6e140f22008-11-18 20:37:55 +00001485 } else {
1486 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001487 }
1488 }
1489}
1490
pbrook6658ffb2007-03-16 23:58:11 +00001491/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1492 so these check for a hit then pass through to the normal out-of-line
1493 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001494static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001495 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001496{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001497 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1498 switch (size) {
1499 case 1: return ldub_phys(addr);
1500 case 2: return lduw_phys(addr);
1501 case 4: return ldl_phys(addr);
1502 default: abort();
1503 }
pbrook6658ffb2007-03-16 23:58:11 +00001504}
1505
Avi Kivitya8170e52012-10-23 12:30:10 +02001506static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001507 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001508{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001509 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1510 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001511 case 1:
1512 stb_phys(addr, val);
1513 break;
1514 case 2:
1515 stw_phys(addr, val);
1516 break;
1517 case 4:
1518 stl_phys(addr, val);
1519 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001520 default: abort();
1521 }
pbrook6658ffb2007-03-16 23:58:11 +00001522}
1523
Avi Kivity1ec9b902012-01-02 12:47:48 +02001524static const MemoryRegionOps watch_mem_ops = {
1525 .read = watch_mem_read,
1526 .write = watch_mem_write,
1527 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001528};
pbrook6658ffb2007-03-16 23:58:11 +00001529
Avi Kivitya8170e52012-10-23 12:30:10 +02001530static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001531 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001532{
Avi Kivity70c68e42012-01-02 12:32:48 +02001533 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001534 unsigned int idx = SUBPAGE_IDX(addr);
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001535 uint64_t val;
1536
Avi Kivity5312bd82012-02-12 18:32:55 +02001537 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001538#if defined(DEBUG_SUBPAGE)
1539 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1540 mmio, len, addr, idx);
1541#endif
blueswir1db7b5422007-05-26 17:36:03 +00001542
Avi Kivity5312bd82012-02-12 18:32:55 +02001543 section = &phys_sections[mmio->sub_section[idx]];
1544 addr += mmio->base;
1545 addr -= section->offset_within_address_space;
1546 addr += section->offset_within_region;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001547 io_mem_read(section->mr, addr, &val, len);
1548 return val;
blueswir1db7b5422007-05-26 17:36:03 +00001549}
1550
Avi Kivitya8170e52012-10-23 12:30:10 +02001551static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001552 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001553{
Avi Kivity70c68e42012-01-02 12:32:48 +02001554 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07001555 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02001556 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00001557#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02001558 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
1559 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07001560 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00001561#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07001562
Avi Kivity5312bd82012-02-12 18:32:55 +02001563 section = &phys_sections[mmio->sub_section[idx]];
1564 addr += mmio->base;
1565 addr -= section->offset_within_address_space;
1566 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02001567 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00001568}
1569
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001570static bool subpage_accepts(void *opaque, hwaddr addr,
1571 unsigned size, bool is_write)
1572{
1573 subpage_t *mmio = opaque;
1574 unsigned int idx = SUBPAGE_IDX(addr);
1575 MemoryRegionSection *section;
1576#if defined(DEBUG_SUBPAGE)
1577 printf("%s: subpage %p %c len %d addr " TARGET_FMT_plx
1578 " idx %d\n", __func__, mmio,
1579 is_write ? 'w' : 'r', len, addr, idx);
1580#endif
1581
1582 section = &phys_sections[mmio->sub_section[idx]];
1583 addr += mmio->base;
1584 addr -= section->offset_within_address_space;
1585 addr += section->offset_within_region;
1586 return memory_region_access_valid(section->mr, addr, size, is_write);
1587}
1588
Avi Kivity70c68e42012-01-02 12:32:48 +02001589static const MemoryRegionOps subpage_ops = {
1590 .read = subpage_read,
1591 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001592 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001593 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001594};
1595
Avi Kivitya8170e52012-10-23 12:30:10 +02001596static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001597 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001598{
1599 ram_addr_t raddr = addr;
1600 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001601 switch (size) {
1602 case 1: return ldub_p(ptr);
1603 case 2: return lduw_p(ptr);
1604 case 4: return ldl_p(ptr);
1605 default: abort();
1606 }
Andreas Färber56384e82011-11-30 16:26:21 +01001607}
1608
Avi Kivitya8170e52012-10-23 12:30:10 +02001609static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02001610 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01001611{
1612 ram_addr_t raddr = addr;
1613 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02001614 switch (size) {
1615 case 1: return stb_p(ptr, value);
1616 case 2: return stw_p(ptr, value);
1617 case 4: return stl_p(ptr, value);
1618 default: abort();
1619 }
Andreas Färber56384e82011-11-30 16:26:21 +01001620}
1621
Avi Kivityde712f92012-01-02 12:41:07 +02001622static const MemoryRegionOps subpage_ram_ops = {
1623 .read = subpage_ram_read,
1624 .write = subpage_ram_write,
1625 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01001626};
1627
Anthony Liguoric227f092009-10-01 16:12:16 -05001628static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001629 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001630{
1631 int idx, eidx;
1632
1633 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1634 return -1;
1635 idx = SUBPAGE_IDX(start);
1636 eidx = SUBPAGE_IDX(end);
1637#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00001638 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00001639 mmio, start, end, idx, eidx, memory);
1640#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02001641 if (memory_region_is_ram(phys_sections[section].mr)) {
1642 MemoryRegionSection new_section = phys_sections[section];
1643 new_section.mr = &io_mem_subpage_ram;
1644 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01001645 }
blueswir1db7b5422007-05-26 17:36:03 +00001646 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001647 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001648 }
1649
1650 return 0;
1651}
1652
Avi Kivitya8170e52012-10-23 12:30:10 +02001653static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001654{
Anthony Liguoric227f092009-10-01 16:12:16 -05001655 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001656
Anthony Liguori7267c092011-08-20 22:09:37 -05001657 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001658
1659 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02001660 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
1661 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001662 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001663#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00001664 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1665 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00001666#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02001667 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00001668
1669 return mmio;
1670}
1671
Avi Kivity5312bd82012-02-12 18:32:55 +02001672static uint16_t dummy_section(MemoryRegion *mr)
1673{
1674 MemoryRegionSection section = {
1675 .mr = mr,
1676 .offset_within_address_space = 0,
1677 .offset_within_region = 0,
1678 .size = UINT64_MAX,
1679 };
1680
1681 return phys_section_add(&section);
1682}
1683
Avi Kivitya8170e52012-10-23 12:30:10 +02001684MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001685{
Avi Kivity37ec01d2012-03-08 18:08:35 +02001686 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001687}
1688
Avi Kivitye9179ce2009-06-14 11:38:52 +03001689static void io_mem_init(void)
1690{
Paolo Bonzinibf8d5162013-05-24 14:39:13 +02001691 memory_region_init_io(&io_mem_rom, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001692 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
1693 "unassigned", UINT64_MAX);
1694 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
1695 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02001696 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
1697 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001698 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
1699 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001700}
1701
Avi Kivityac1970f2012-10-03 16:22:53 +02001702static void mem_begin(MemoryListener *listener)
1703{
1704 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
1705
1706 destroy_all_mappings(d);
1707 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
1708}
1709
Avi Kivity50c1e142012-02-08 21:36:02 +02001710static void core_begin(MemoryListener *listener)
1711{
Avi Kivity5312bd82012-02-12 18:32:55 +02001712 phys_sections_clear();
1713 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02001714 phys_section_notdirty = dummy_section(&io_mem_notdirty);
1715 phys_section_rom = dummy_section(&io_mem_rom);
1716 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02001717}
1718
Avi Kivity1d711482012-10-02 18:54:45 +02001719static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001720{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001721 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02001722
1723 /* since each CPU stores ram addresses in its TLB cache, we must
1724 reset the modified entries */
1725 /* XXX: slow ! */
1726 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1727 tlb_flush(env, 1);
1728 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001729}
1730
Avi Kivity93632742012-02-08 16:54:16 +02001731static void core_log_global_start(MemoryListener *listener)
1732{
1733 cpu_physical_memory_set_dirty_tracking(1);
1734}
1735
1736static void core_log_global_stop(MemoryListener *listener)
1737{
1738 cpu_physical_memory_set_dirty_tracking(0);
1739}
1740
Avi Kivity4855d412012-02-08 21:16:05 +02001741static void io_region_add(MemoryListener *listener,
1742 MemoryRegionSection *section)
1743{
Avi Kivitya2d33522012-03-05 17:40:12 +02001744 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
1745
1746 mrio->mr = section->mr;
1747 mrio->offset = section->offset_within_region;
1748 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02001749 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02001750 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02001751}
1752
1753static void io_region_del(MemoryListener *listener,
1754 MemoryRegionSection *section)
1755{
1756 isa_unassign_ioport(section->offset_within_address_space, section->size);
1757}
1758
Avi Kivity93632742012-02-08 16:54:16 +02001759static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001760 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02001761 .log_global_start = core_log_global_start,
1762 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001763 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001764};
1765
Avi Kivity4855d412012-02-08 21:16:05 +02001766static MemoryListener io_memory_listener = {
1767 .region_add = io_region_add,
1768 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02001769 .priority = 0,
1770};
1771
Avi Kivity1d711482012-10-02 18:54:45 +02001772static MemoryListener tcg_memory_listener = {
1773 .commit = tcg_commit,
1774};
1775
Avi Kivityac1970f2012-10-03 16:22:53 +02001776void address_space_init_dispatch(AddressSpace *as)
1777{
1778 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1779
1780 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1781 d->listener = (MemoryListener) {
1782 .begin = mem_begin,
1783 .region_add = mem_add,
1784 .region_nop = mem_add,
1785 .priority = 0,
1786 };
1787 as->dispatch = d;
1788 memory_listener_register(&d->listener, as);
1789}
1790
Avi Kivity83f3c252012-10-07 12:59:55 +02001791void address_space_destroy_dispatch(AddressSpace *as)
1792{
1793 AddressSpaceDispatch *d = as->dispatch;
1794
1795 memory_listener_unregister(&d->listener);
1796 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
1797 g_free(d);
1798 as->dispatch = NULL;
1799}
1800
Avi Kivity62152b82011-07-26 14:26:14 +03001801static void memory_map_init(void)
1802{
Anthony Liguori7267c092011-08-20 22:09:37 -05001803 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03001804 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001805 address_space_init(&address_space_memory, system_memory);
1806 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03001807
Anthony Liguori7267c092011-08-20 22:09:37 -05001808 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03001809 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02001810 address_space_init(&address_space_io, system_io);
1811 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02001812
Avi Kivityf6790af2012-10-02 20:13:51 +02001813 memory_listener_register(&core_memory_listener, &address_space_memory);
1814 memory_listener_register(&io_memory_listener, &address_space_io);
1815 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10001816
1817 dma_context_init(&dma_context_memory, &address_space_memory,
1818 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03001819}
1820
1821MemoryRegion *get_system_memory(void)
1822{
1823 return system_memory;
1824}
1825
Avi Kivity309cb472011-08-08 16:09:03 +03001826MemoryRegion *get_system_io(void)
1827{
1828 return system_io;
1829}
1830
pbrooke2eef172008-06-08 01:09:01 +00001831#endif /* !defined(CONFIG_USER_ONLY) */
1832
bellard13eb76e2004-01-24 15:23:36 +00001833/* physical memory access (slow version, mainly for debug) */
1834#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001835int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001836 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001837{
1838 int l, flags;
1839 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001840 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001841
1842 while (len > 0) {
1843 page = addr & TARGET_PAGE_MASK;
1844 l = (page + TARGET_PAGE_SIZE) - addr;
1845 if (l > len)
1846 l = len;
1847 flags = page_get_flags(page);
1848 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001849 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001850 if (is_write) {
1851 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001852 return -1;
bellard579a97f2007-11-11 14:26:47 +00001853 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001854 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001855 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001856 memcpy(p, buf, l);
1857 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001858 } else {
1859 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001860 return -1;
bellard579a97f2007-11-11 14:26:47 +00001861 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001862 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001863 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001864 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001865 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001866 }
1867 len -= l;
1868 buf += l;
1869 addr += l;
1870 }
Paul Brooka68fe892010-03-01 00:08:59 +00001871 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001872}
bellard8df1cd02005-01-28 22:37:22 +00001873
bellard13eb76e2004-01-24 15:23:36 +00001874#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001875
Avi Kivitya8170e52012-10-23 12:30:10 +02001876static void invalidate_and_set_dirty(hwaddr addr,
1877 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001878{
1879 if (!cpu_physical_memory_is_dirty(addr)) {
1880 /* invalidate code */
1881 tb_invalidate_phys_page_range(addr, addr + length, 0);
1882 /* set dirty bit */
1883 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1884 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001885 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001886}
1887
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001888static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1889{
1890 if (memory_region_is_ram(mr)) {
1891 return !(is_write && mr->readonly);
1892 }
1893 if (memory_region_is_romd(mr)) {
1894 return !is_write;
1895 }
1896
1897 return false;
1898}
1899
Paolo Bonzini82f25632013-05-24 11:59:43 +02001900static inline int memory_access_size(int l, hwaddr addr)
1901{
1902 if (l >= 4 && ((addr & 3) == 0)) {
1903 return 4;
1904 }
1905 if (l >= 2 && ((addr & 1) == 0)) {
1906 return 2;
1907 }
1908 return 1;
1909}
1910
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001911bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001912 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001913{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001914 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001915 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001916 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001917 hwaddr addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02001918 MemoryRegionSection *section;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001919 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001920
bellard13eb76e2004-01-24 15:23:36 +00001921 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001922 l = len;
1923 section = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001924
bellard13eb76e2004-01-24 15:23:36 +00001925 if (is_write) {
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001926 if (!memory_access_is_direct(section->mr, is_write)) {
Paolo Bonzini82f25632013-05-24 11:59:43 +02001927 l = memory_access_size(l, addr1);
bellard6a00d602005-11-21 23:25:50 +00001928 /* XXX: could force cpu_single_env to NULL to avoid
1929 potential bugs */
Paolo Bonzini82f25632013-05-24 11:59:43 +02001930 if (l == 4) {
bellard1c213d12005-09-03 10:49:04 +00001931 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001932 val = ldl_p(buf);
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001933 error |= io_mem_write(section->mr, addr1, val, 4);
Paolo Bonzini82f25632013-05-24 11:59:43 +02001934 } else if (l == 2) {
bellard1c213d12005-09-03 10:49:04 +00001935 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001936 val = lduw_p(buf);
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001937 error |= io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00001938 } else {
bellard1c213d12005-09-03 10:49:04 +00001939 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001940 val = ldub_p(buf);
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001941 error |= io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00001942 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001943 } else {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001944 addr1 += memory_region_get_ram_addr(section->mr);
bellard13eb76e2004-01-24 15:23:36 +00001945 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001946 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001947 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001948 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001949 }
1950 } else {
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001951 if (!memory_access_is_direct(section->mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00001952 /* I/O case */
Paolo Bonzini82f25632013-05-24 11:59:43 +02001953 l = memory_access_size(l, addr1);
1954 if (l == 4) {
bellard13eb76e2004-01-24 15:23:36 +00001955 /* 32 bit read access */
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001956 error |= io_mem_read(section->mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00001957 stl_p(buf, val);
Paolo Bonzini82f25632013-05-24 11:59:43 +02001958 } else if (l == 2) {
bellard13eb76e2004-01-24 15:23:36 +00001959 /* 16 bit read access */
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001960 error |= io_mem_read(section->mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00001961 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001962 } else {
bellard1c213d12005-09-03 10:49:04 +00001963 /* 8 bit read access */
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001964 error |= io_mem_read(section->mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00001965 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00001966 }
1967 } else {
1968 /* RAM case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001969 ptr = qemu_get_ram_ptr(section->mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02001970 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001971 }
1972 }
1973 len -= l;
1974 buf += l;
1975 addr += l;
1976 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001977
1978 return error;
bellard13eb76e2004-01-24 15:23:36 +00001979}
bellard8df1cd02005-01-28 22:37:22 +00001980
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001981bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001982 const uint8_t *buf, int len)
1983{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001984 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02001985}
1986
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001987bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001988{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001989 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02001990}
1991
1992
Avi Kivitya8170e52012-10-23 12:30:10 +02001993void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001994 int len, int is_write)
1995{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001996 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02001997}
1998
bellardd0ecd2a2006-04-23 17:14:48 +00001999/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002000void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002001 const uint8_t *buf, int len)
2002{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002003 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002004 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002005 hwaddr addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02002006 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00002007
bellardd0ecd2a2006-04-23 17:14:48 +00002008 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002009 l = len;
2010 section = address_space_translate(&address_space_memory,
2011 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002012
Blue Swirlcc5bea62012-04-14 14:56:48 +00002013 if (!(memory_region_is_ram(section->mr) ||
2014 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002015 /* do nothing */
2016 } else {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002017 addr1 += memory_region_get_ram_addr(section->mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002018 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002019 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002020 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002021 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002022 }
2023 len -= l;
2024 buf += l;
2025 addr += l;
2026 }
2027}
2028
aliguori6d16c2f2009-01-22 16:59:11 +00002029typedef struct {
2030 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002031 hwaddr addr;
2032 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002033} BounceBuffer;
2034
2035static BounceBuffer bounce;
2036
aliguoriba223c22009-01-22 16:59:16 +00002037typedef struct MapClient {
2038 void *opaque;
2039 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002040 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002041} MapClient;
2042
Blue Swirl72cf2d42009-09-12 07:36:22 +00002043static QLIST_HEAD(map_client_list, MapClient) map_client_list
2044 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002045
2046void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2047{
Anthony Liguori7267c092011-08-20 22:09:37 -05002048 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002049
2050 client->opaque = opaque;
2051 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002052 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002053 return client;
2054}
2055
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002056static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002057{
2058 MapClient *client = (MapClient *)_client;
2059
Blue Swirl72cf2d42009-09-12 07:36:22 +00002060 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002061 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002062}
2063
2064static void cpu_notify_map_clients(void)
2065{
2066 MapClient *client;
2067
Blue Swirl72cf2d42009-09-12 07:36:22 +00002068 while (!QLIST_EMPTY(&map_client_list)) {
2069 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002070 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002071 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002072 }
2073}
2074
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002075bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2076{
2077 MemoryRegionSection *section;
2078 hwaddr l, xlat;
2079
2080 while (len > 0) {
2081 l = len;
2082 section = address_space_translate(as, addr, &xlat, &l, is_write);
2083 if (!memory_access_is_direct(section->mr, is_write)) {
2084 l = memory_access_size(l, addr);
2085 if (!memory_region_access_valid(section->mr, xlat, l, is_write)) {
2086 return false;
2087 }
2088 }
2089
2090 len -= l;
2091 addr += l;
2092 }
2093 return true;
2094}
2095
aliguori6d16c2f2009-01-22 16:59:11 +00002096/* Map a physical memory region into a host virtual address.
2097 * May map a subset of the requested range, given by and returned in *plen.
2098 * May return NULL if resources needed to perform the mapping are exhausted.
2099 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002100 * Use cpu_register_map_client() to know when retrying the map operation is
2101 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002102 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002103void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002104 hwaddr addr,
2105 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002106 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002107{
Avi Kivitya8170e52012-10-23 12:30:10 +02002108 hwaddr len = *plen;
2109 hwaddr todo = 0;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002110 hwaddr l, xlat;
Avi Kivityf3705d52012-03-08 16:16:34 +02002111 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002112 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002113 ram_addr_t rlen;
2114 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002115
2116 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002117 l = len;
2118 section = address_space_translate(as, addr, &xlat, &l, is_write);
aliguori6d16c2f2009-01-22 16:59:11 +00002119
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002120 if (!memory_access_is_direct(section->mr, is_write)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002121 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00002122 break;
2123 }
2124 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2125 bounce.addr = addr;
2126 bounce.len = l;
2127 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002128 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002129 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002130
2131 *plen = l;
2132 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00002133 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002134 if (!todo) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002135 raddr = memory_region_get_ram_addr(section->mr) + xlat;
2136 } else {
2137 if (memory_region_get_ram_addr(section->mr) + xlat != raddr + todo) {
2138 break;
2139 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002140 }
aliguori6d16c2f2009-01-22 16:59:11 +00002141
2142 len -= l;
2143 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002144 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00002145 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002146 rlen = todo;
2147 ret = qemu_ram_ptr_length(raddr, &rlen);
2148 *plen = rlen;
2149 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00002150}
2151
Avi Kivityac1970f2012-10-03 16:22:53 +02002152/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002153 * Will also mark the memory as dirty if is_write == 1. access_len gives
2154 * the amount of memory that was actually read or written by the caller.
2155 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002156void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2157 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002158{
2159 if (buffer != bounce.buffer) {
2160 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002161 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002162 while (access_len) {
2163 unsigned l;
2164 l = TARGET_PAGE_SIZE;
2165 if (l > access_len)
2166 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002167 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002168 addr1 += l;
2169 access_len -= l;
2170 }
2171 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002172 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002173 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002174 }
aliguori6d16c2f2009-01-22 16:59:11 +00002175 return;
2176 }
2177 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002178 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002179 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002180 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002181 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00002182 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002183}
bellardd0ecd2a2006-04-23 17:14:48 +00002184
Avi Kivitya8170e52012-10-23 12:30:10 +02002185void *cpu_physical_memory_map(hwaddr addr,
2186 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002187 int is_write)
2188{
2189 return address_space_map(&address_space_memory, addr, plen, is_write);
2190}
2191
Avi Kivitya8170e52012-10-23 12:30:10 +02002192void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2193 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002194{
2195 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2196}
2197
bellard8df1cd02005-01-28 22:37:22 +00002198/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002199static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002200 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002201{
bellard8df1cd02005-01-28 22:37:22 +00002202 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002203 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002204 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002205 hwaddr l = 4;
2206 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002207
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002208 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2209 false);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002210 if (l < 4 || !memory_access_is_direct(section->mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002211 /* I/O case */
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002212 io_mem_read(section->mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002213#if defined(TARGET_WORDS_BIGENDIAN)
2214 if (endian == DEVICE_LITTLE_ENDIAN) {
2215 val = bswap32(val);
2216 }
2217#else
2218 if (endian == DEVICE_BIG_ENDIAN) {
2219 val = bswap32(val);
2220 }
2221#endif
bellard8df1cd02005-01-28 22:37:22 +00002222 } else {
2223 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002224 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002225 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002226 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002227 switch (endian) {
2228 case DEVICE_LITTLE_ENDIAN:
2229 val = ldl_le_p(ptr);
2230 break;
2231 case DEVICE_BIG_ENDIAN:
2232 val = ldl_be_p(ptr);
2233 break;
2234 default:
2235 val = ldl_p(ptr);
2236 break;
2237 }
bellard8df1cd02005-01-28 22:37:22 +00002238 }
2239 return val;
2240}
2241
Avi Kivitya8170e52012-10-23 12:30:10 +02002242uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002243{
2244 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2245}
2246
Avi Kivitya8170e52012-10-23 12:30:10 +02002247uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002248{
2249 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2250}
2251
Avi Kivitya8170e52012-10-23 12:30:10 +02002252uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002253{
2254 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2255}
2256
bellard84b7b8e2005-11-28 21:19:04 +00002257/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002258static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002259 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002260{
bellard84b7b8e2005-11-28 21:19:04 +00002261 uint8_t *ptr;
2262 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002263 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002264 hwaddr l = 8;
2265 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002266
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002267 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2268 false);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002269 if (l < 8 || !memory_access_is_direct(section->mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002270 /* I/O case */
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002271 io_mem_read(section->mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002272#if defined(TARGET_WORDS_BIGENDIAN)
2273 if (endian == DEVICE_LITTLE_ENDIAN) {
2274 val = bswap64(val);
2275 }
2276#else
2277 if (endian == DEVICE_BIG_ENDIAN) {
2278 val = bswap64(val);
2279 }
2280#endif
bellard84b7b8e2005-11-28 21:19:04 +00002281 } else {
2282 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002283 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002284 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002285 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002286 switch (endian) {
2287 case DEVICE_LITTLE_ENDIAN:
2288 val = ldq_le_p(ptr);
2289 break;
2290 case DEVICE_BIG_ENDIAN:
2291 val = ldq_be_p(ptr);
2292 break;
2293 default:
2294 val = ldq_p(ptr);
2295 break;
2296 }
bellard84b7b8e2005-11-28 21:19:04 +00002297 }
2298 return val;
2299}
2300
Avi Kivitya8170e52012-10-23 12:30:10 +02002301uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002302{
2303 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2304}
2305
Avi Kivitya8170e52012-10-23 12:30:10 +02002306uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002307{
2308 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2309}
2310
Avi Kivitya8170e52012-10-23 12:30:10 +02002311uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002312{
2313 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2314}
2315
bellardaab33092005-10-30 20:48:42 +00002316/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002317uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002318{
2319 uint8_t val;
2320 cpu_physical_memory_read(addr, &val, 1);
2321 return val;
2322}
2323
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002324/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002325static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002326 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002327{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002328 uint8_t *ptr;
2329 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02002330 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002331 hwaddr l = 2;
2332 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002333
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002334 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2335 false);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002336 if (l < 2 || !memory_access_is_direct(section->mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002337 /* I/O case */
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002338 io_mem_read(section->mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002339#if defined(TARGET_WORDS_BIGENDIAN)
2340 if (endian == DEVICE_LITTLE_ENDIAN) {
2341 val = bswap16(val);
2342 }
2343#else
2344 if (endian == DEVICE_BIG_ENDIAN) {
2345 val = bswap16(val);
2346 }
2347#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002348 } else {
2349 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02002350 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002351 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002352 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002353 switch (endian) {
2354 case DEVICE_LITTLE_ENDIAN:
2355 val = lduw_le_p(ptr);
2356 break;
2357 case DEVICE_BIG_ENDIAN:
2358 val = lduw_be_p(ptr);
2359 break;
2360 default:
2361 val = lduw_p(ptr);
2362 break;
2363 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002364 }
2365 return val;
bellardaab33092005-10-30 20:48:42 +00002366}
2367
Avi Kivitya8170e52012-10-23 12:30:10 +02002368uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002369{
2370 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2371}
2372
Avi Kivitya8170e52012-10-23 12:30:10 +02002373uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002374{
2375 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2376}
2377
Avi Kivitya8170e52012-10-23 12:30:10 +02002378uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002379{
2380 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2381}
2382
bellard8df1cd02005-01-28 22:37:22 +00002383/* warning: addr must be aligned. The ram page is not masked as dirty
2384 and the code inside is not invalidated. It is useful if the dirty
2385 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002386void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002387{
bellard8df1cd02005-01-28 22:37:22 +00002388 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002389 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002390 hwaddr l = 4;
2391 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002392
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002393 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2394 true);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002395 if (l < 4 || !memory_access_is_direct(section->mr, true)) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002396 io_mem_write(section->mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002397 } else {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002398 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002399 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002400 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002401
2402 if (unlikely(in_migration)) {
2403 if (!cpu_physical_memory_is_dirty(addr1)) {
2404 /* invalidate code */
2405 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2406 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002407 cpu_physical_memory_set_dirty_flags(
2408 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002409 }
2410 }
bellard8df1cd02005-01-28 22:37:22 +00002411 }
2412}
2413
2414/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002415static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002416 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002417{
bellard8df1cd02005-01-28 22:37:22 +00002418 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002419 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002420 hwaddr l = 4;
2421 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002422
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002423 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2424 true);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002425 if (l < 4 || !memory_access_is_direct(section->mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002426#if defined(TARGET_WORDS_BIGENDIAN)
2427 if (endian == DEVICE_LITTLE_ENDIAN) {
2428 val = bswap32(val);
2429 }
2430#else
2431 if (endian == DEVICE_BIG_ENDIAN) {
2432 val = bswap32(val);
2433 }
2434#endif
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002435 io_mem_write(section->mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002436 } else {
bellard8df1cd02005-01-28 22:37:22 +00002437 /* RAM case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002438 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002439 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002440 switch (endian) {
2441 case DEVICE_LITTLE_ENDIAN:
2442 stl_le_p(ptr, val);
2443 break;
2444 case DEVICE_BIG_ENDIAN:
2445 stl_be_p(ptr, val);
2446 break;
2447 default:
2448 stl_p(ptr, val);
2449 break;
2450 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002451 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002452 }
2453}
2454
Avi Kivitya8170e52012-10-23 12:30:10 +02002455void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002456{
2457 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2458}
2459
Avi Kivitya8170e52012-10-23 12:30:10 +02002460void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002461{
2462 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2463}
2464
Avi Kivitya8170e52012-10-23 12:30:10 +02002465void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466{
2467 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2468}
2469
bellardaab33092005-10-30 20:48:42 +00002470/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002471void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002472{
2473 uint8_t v = val;
2474 cpu_physical_memory_write(addr, &v, 1);
2475}
2476
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002477/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002478static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002479 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002480{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002481 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02002482 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002483 hwaddr l = 2;
2484 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002485
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002486 section = address_space_translate(&address_space_memory, addr, &addr1, &l,
2487 true);
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002488 if (l < 2 || !memory_access_is_direct(section->mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002489#if defined(TARGET_WORDS_BIGENDIAN)
2490 if (endian == DEVICE_LITTLE_ENDIAN) {
2491 val = bswap16(val);
2492 }
2493#else
2494 if (endian == DEVICE_BIG_ENDIAN) {
2495 val = bswap16(val);
2496 }
2497#endif
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002498 io_mem_write(section->mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002499 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002500 /* RAM case */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002501 addr1 += memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002502 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002503 switch (endian) {
2504 case DEVICE_LITTLE_ENDIAN:
2505 stw_le_p(ptr, val);
2506 break;
2507 case DEVICE_BIG_ENDIAN:
2508 stw_be_p(ptr, val);
2509 break;
2510 default:
2511 stw_p(ptr, val);
2512 break;
2513 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002514 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002515 }
bellardaab33092005-10-30 20:48:42 +00002516}
2517
Avi Kivitya8170e52012-10-23 12:30:10 +02002518void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002519{
2520 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2521}
2522
Avi Kivitya8170e52012-10-23 12:30:10 +02002523void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002524{
2525 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2526}
2527
Avi Kivitya8170e52012-10-23 12:30:10 +02002528void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002529{
2530 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2531}
2532
bellardaab33092005-10-30 20:48:42 +00002533/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002534void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002535{
2536 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002537 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002538}
2539
Avi Kivitya8170e52012-10-23 12:30:10 +02002540void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002541{
2542 val = cpu_to_le64(val);
2543 cpu_physical_memory_write(addr, &val, 8);
2544}
2545
Avi Kivitya8170e52012-10-23 12:30:10 +02002546void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002547{
2548 val = cpu_to_be64(val);
2549 cpu_physical_memory_write(addr, &val, 8);
2550}
2551
aliguori5e2972f2009-03-28 17:51:36 +00002552/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01002553int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002554 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002555{
2556 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002557 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002558 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002559
2560 while (len > 0) {
2561 page = addr & TARGET_PAGE_MASK;
2562 phys_addr = cpu_get_phys_page_debug(env, page);
2563 /* if no physical page mapped, return an error */
2564 if (phys_addr == -1)
2565 return -1;
2566 l = (page + TARGET_PAGE_SIZE) - addr;
2567 if (l > len)
2568 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002569 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002570 if (is_write)
2571 cpu_physical_memory_write_rom(phys_addr, buf, l);
2572 else
aliguori5e2972f2009-03-28 17:51:36 +00002573 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002574 len -= l;
2575 buf += l;
2576 addr += l;
2577 }
2578 return 0;
2579}
Paul Brooka68fe892010-03-01 00:08:59 +00002580#endif
bellard13eb76e2004-01-24 15:23:36 +00002581
Blue Swirl8e4a4242013-01-06 18:30:17 +00002582#if !defined(CONFIG_USER_ONLY)
2583
2584/*
2585 * A helper function for the _utterly broken_ virtio device model to find out if
2586 * it's running on a big endian machine. Don't do this at home kids!
2587 */
2588bool virtio_is_big_endian(void);
2589bool virtio_is_big_endian(void)
2590{
2591#if defined(TARGET_WORDS_BIGENDIAN)
2592 return true;
2593#else
2594 return false;
2595#endif
2596}
2597
2598#endif
2599
Wen Congyang76f35532012-05-07 12:04:18 +08002600#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002601bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002602{
2603 MemoryRegionSection *section;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002604 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002605
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002606 section = address_space_translate(&address_space_memory,
2607 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002608
2609 return !(memory_region_is_ram(section->mr) ||
2610 memory_region_is_romd(section->mr));
2611}
2612#endif