blob: f4b9ef25f5c5bd8c3935fa46528dd63923f4b713 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
aliguori74576192008-10-06 14:02:03 +000057static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000058
Paolo Bonzinia3161032012-11-14 15:54:48 +010059RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030060
61static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030062static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030063
Avi Kivityf6790af2012-10-02 20:13:51 +020064AddressSpace address_space_io;
65AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020066
Paolo Bonzini0844e002013-05-24 14:37:28 +020067MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020068static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069
pbrooke2eef172008-06-08 01:09:01 +000070#endif
bellard9fa3e852004-01-04 18:06:42 +000071
Andreas Färberbdc44642013-06-24 23:50:24 +020072struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000073/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020075DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000076/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000077 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000078 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010079int use_icount;
bellard6a00d602005-11-21 23:25:50 +000080
pbrooke2eef172008-06-08 01:09:01 +000081#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020082
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020083typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
Paolo Bonzini0475d942013-05-29 12:28:21 +020091typedef PhysPageEntry Node[L2_SIZE];
92
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020093struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
Paolo Bonzini0475d942013-05-29 12:28:21 +020098 Node *nodes;
99 MemoryRegionSection *sections;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200100 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101};
102
Jan Kiszka90260c62013-05-26 21:46:51 +0200103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200106 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200115
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
Paolo Bonzini60926662013-05-29 12:30:26 +0200125static PhysPageMap *prev_map;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200126static PhysPageMap next_map;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127
Avi Kivity07f07b32012-02-13 20:45:32 +0200128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200129
pbrooke2eef172008-06-08 01:09:01 +0000130static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300131static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000132
Avi Kivity1ec9b902012-01-02 12:47:48 +0200133static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000134#endif
bellard54936002003-05-13 00:25:15 +0000135
Paul Brook6d9a1302010-02-28 23:55:53 +0000136#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200137
Avi Kivityf7bf5462012-02-13 20:12:05 +0200138static void phys_map_node_reserve(unsigned nodes)
139{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200140 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
141 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 16);
143 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
144 next_map.nodes_nb + nodes);
145 next_map.nodes = g_renew(Node, next_map.nodes,
146 next_map.nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147 }
148}
149
150static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200151{
152 unsigned i;
153 uint16_t ret;
154
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200155 ret = next_map.nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200156 assert(ret != PHYS_MAP_NODE_NIL);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200157 assert(ret != next_map.nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200158 for (i = 0; i < L2_SIZE; ++i) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200159 next_map.nodes[ret][i].is_leaf = 0;
160 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200161 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200162 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200163}
164
Avi Kivitya8170e52012-10-23 12:30:10 +0200165static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
166 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200167 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168{
169 PhysPageEntry *p;
170 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200171 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172
Avi Kivity07f07b32012-02-13 20:45:32 +0200173 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200174 lp->ptr = phys_map_node_alloc();
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200175 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200176 if (level == 0) {
177 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200178 p[i].is_leaf = 1;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200179 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200180 }
181 }
182 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200183 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200184 }
Avi Kivity29990972012-02-13 20:21:20 +0200185 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186
Avi Kivity29990972012-02-13 20:21:20 +0200187 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200188 if ((*index & (step - 1)) == 0 && *nb >= step) {
189 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200190 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200191 *index += step;
192 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200193 } else {
194 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 }
196 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200197 }
198}
199
Avi Kivityac1970f2012-10-03 16:22:53 +0200200static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200201 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200202 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000203{
Avi Kivity29990972012-02-13 20:21:20 +0200204 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200205 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000206
Avi Kivityac1970f2012-10-03 16:22:53 +0200207 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000208}
209
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200210static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
211 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000212{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200213 PhysPageEntry *p;
214 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200215
Avi Kivity07f07b32012-02-13 20:45:32 +0200216 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200217 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200218 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200219 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200220 p = nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200221 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200222 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200223 return &sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200224}
225
Blue Swirle5548612012-04-21 13:08:33 +0000226bool memory_region_is_unassigned(MemoryRegion *mr)
227{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200228 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000229 && mr != &io_mem_watch;
230}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200231
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200232static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200233 hwaddr addr,
234 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200235{
Jan Kiszka90260c62013-05-26 21:46:51 +0200236 MemoryRegionSection *section;
237 subpage_t *subpage;
238
Paolo Bonzini0475d942013-05-29 12:28:21 +0200239 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
240 d->nodes, d->sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200241 if (resolve_subpage && section->mr->subpage) {
242 subpage = container_of(section->mr, subpage_t, iomem);
Paolo Bonzini0475d942013-05-29 12:28:21 +0200243 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200244 }
245 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200246}
247
Jan Kiszka90260c62013-05-26 21:46:51 +0200248static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200249address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200250 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200251{
252 MemoryRegionSection *section;
253 Int128 diff;
254
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200255 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200256 /* Compute offset within MemoryRegionSection */
257 addr -= section->offset_within_address_space;
258
259 /* Compute offset within MemoryRegion */
260 *xlat = addr + section->offset_within_region;
261
262 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100263 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200264 return section;
265}
Jan Kiszka90260c62013-05-26 21:46:51 +0200266
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200267MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
268 hwaddr *xlat, hwaddr *plen,
269 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200270{
Avi Kivity30951152012-10-30 13:47:46 +0200271 IOMMUTLBEntry iotlb;
272 MemoryRegionSection *section;
273 MemoryRegion *mr;
274 hwaddr len = *plen;
275
276 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200277 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200278 mr = section->mr;
279
280 if (!mr->iommu_ops) {
281 break;
282 }
283
284 iotlb = mr->iommu_ops->translate(mr, addr);
285 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
286 | (addr & iotlb.addr_mask));
287 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
288 if (!(iotlb.perm & (1 << is_write))) {
289 mr = &io_mem_unassigned;
290 break;
291 }
292
293 as = iotlb.target_as;
294 }
295
296 *plen = len;
297 *xlat = addr;
298 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200299}
300
301MemoryRegionSection *
302address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
303 hwaddr *plen)
304{
Avi Kivity30951152012-10-30 13:47:46 +0200305 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200306 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200307
308 assert(!section->mr->iommu_ops);
309 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200310}
bellard9fa3e852004-01-04 18:06:42 +0000311#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000312
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200313void cpu_exec_init_all(void)
314{
315#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700316 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200317 memory_map_init();
318 io_mem_init();
319#endif
320}
321
Andreas Färberb170fce2013-01-20 20:23:22 +0100322#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000323
Juan Quintelae59fb372009-09-29 22:48:21 +0200324static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200325{
Andreas Färber259186a2013-01-17 18:51:17 +0100326 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200327
aurel323098dba2009-03-07 21:28:24 +0000328 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
329 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100330 cpu->interrupt_request &= ~0x01;
331 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000332
333 return 0;
334}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200335
Andreas Färber1a1562f2013-06-17 04:09:11 +0200336const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200337 .name = "cpu_common",
338 .version_id = 1,
339 .minimum_version_id = 1,
340 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200341 .post_load = cpu_common_post_load,
342 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100343 VMSTATE_UINT32(halted, CPUState),
344 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200345 VMSTATE_END_OF_LIST()
346 }
347};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200348
pbrook9656f322008-07-01 20:01:19 +0000349#endif
350
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100351CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400352{
Andreas Färberbdc44642013-06-24 23:50:24 +0200353 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400354
Andreas Färberbdc44642013-06-24 23:50:24 +0200355 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100356 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200357 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100358 }
Glauber Costa950f1472009-06-09 12:15:18 -0400359 }
360
Andreas Färberbdc44642013-06-24 23:50:24 +0200361 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400362}
363
Andreas Färber9349b4f2012-03-14 01:38:32 +0100364void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000365{
Andreas Färber9f09e182012-05-03 06:59:07 +0200366 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100367 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200368 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000369 int cpu_index;
370
pbrookc2764712009-03-07 15:24:59 +0000371#if defined(CONFIG_USER_ONLY)
372 cpu_list_lock();
373#endif
bellard6a00d602005-11-21 23:25:50 +0000374 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200375 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000376 cpu_index++;
377 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100378 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100379 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000380 QTAILQ_INIT(&env->breakpoints);
381 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100382#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200383 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100384#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200385 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000386#if defined(CONFIG_USER_ONLY)
387 cpu_list_unlock();
388#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200389 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
390 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
391 }
pbrookb3c77242008-06-30 16:31:04 +0000392#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600393 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000394 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100395 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200396 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000397#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100398 if (cc->vmsd != NULL) {
399 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
400 }
bellardfd6ce8f2003-05-14 19:00:11 +0000401}
402
bellard1fddef42005-04-17 19:16:13 +0000403#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000404#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200405static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000406{
407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
408}
409#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200410static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400411{
Max Filippove8262a12013-09-27 22:29:17 +0400412 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
413 if (phys != -1) {
414 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
415 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400416}
bellardc27004e2005-01-03 23:35:10 +0000417#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000418#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000419
Paul Brookc527ee82010-03-01 03:31:14 +0000420#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100421void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000422
423{
424}
425
Andreas Färber9349b4f2012-03-14 01:38:32 +0100426int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000427 int flags, CPUWatchpoint **watchpoint)
428{
429 return -ENOSYS;
430}
431#else
pbrook6658ffb2007-03-16 23:58:11 +0000432/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100433int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000434 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000435{
aliguorib4051332008-11-18 20:14:20 +0000436 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000437 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000438
aliguorib4051332008-11-18 20:14:20 +0000439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400440 if ((len & (len - 1)) || (addr & ~len_mask) ||
441 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000442 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
443 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
444 return -EINVAL;
445 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500446 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000447
aliguoria1d1bb32008-11-18 20:07:32 +0000448 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000449 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000450 wp->flags = flags;
451
aliguori2dc9f412008-11-18 20:56:59 +0000452 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000453 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000454 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000455 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000456 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000457
pbrook6658ffb2007-03-16 23:58:11 +0000458 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000459
460 if (watchpoint)
461 *watchpoint = wp;
462 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000463}
464
aliguoria1d1bb32008-11-18 20:07:32 +0000465/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100466int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000467 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000468{
aliguorib4051332008-11-18 20:14:20 +0000469 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000470 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000471
Blue Swirl72cf2d42009-09-12 07:36:22 +0000472 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000473 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000474 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000475 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000476 return 0;
477 }
478 }
aliguoria1d1bb32008-11-18 20:07:32 +0000479 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000480}
481
aliguoria1d1bb32008-11-18 20:07:32 +0000482/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100483void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000484{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000485 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000486
aliguoria1d1bb32008-11-18 20:07:32 +0000487 tlb_flush_page(env, watchpoint->vaddr);
488
Anthony Liguori7267c092011-08-20 22:09:37 -0500489 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000490}
491
aliguoria1d1bb32008-11-18 20:07:32 +0000492/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100493void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000494{
aliguoric0ce9982008-11-25 22:13:57 +0000495 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000496
Blue Swirl72cf2d42009-09-12 07:36:22 +0000497 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000498 if (wp->flags & mask)
499 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000500 }
aliguoria1d1bb32008-11-18 20:07:32 +0000501}
Paul Brookc527ee82010-03-01 03:31:14 +0000502#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000503
504/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100505int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000506 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000507{
bellard1fddef42005-04-17 19:16:13 +0000508#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000509 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000510
Anthony Liguori7267c092011-08-20 22:09:37 -0500511 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000512
513 bp->pc = pc;
514 bp->flags = flags;
515
aliguori2dc9f412008-11-18 20:56:59 +0000516 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200517 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000518 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200519 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000520 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200521 }
aliguoria1d1bb32008-11-18 20:07:32 +0000522
Andreas Färber00b941e2013-06-29 18:55:54 +0200523 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000524
Andreas Färber00b941e2013-06-29 18:55:54 +0200525 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000526 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200527 }
aliguoria1d1bb32008-11-18 20:07:32 +0000528 return 0;
529#else
530 return -ENOSYS;
531#endif
532}
533
534/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100535int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000536{
537#if defined(TARGET_HAS_ICE)
538 CPUBreakpoint *bp;
539
Blue Swirl72cf2d42009-09-12 07:36:22 +0000540 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000541 if (bp->pc == pc && bp->flags == flags) {
542 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000543 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000544 }
bellard4c3a88a2003-07-26 12:06:08 +0000545 }
aliguoria1d1bb32008-11-18 20:07:32 +0000546 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000547#else
aliguoria1d1bb32008-11-18 20:07:32 +0000548 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000549#endif
550}
551
aliguoria1d1bb32008-11-18 20:07:32 +0000552/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100553void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000554{
bellard1fddef42005-04-17 19:16:13 +0000555#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000556 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000557
Andreas Färber00b941e2013-06-29 18:55:54 +0200558 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000559
Anthony Liguori7267c092011-08-20 22:09:37 -0500560 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000561#endif
562}
563
564/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100565void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000566{
567#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000568 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000569
Blue Swirl72cf2d42009-09-12 07:36:22 +0000570 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000571 if (bp->flags & mask)
572 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000573 }
bellard4c3a88a2003-07-26 12:06:08 +0000574#endif
575}
576
bellardc33a3462003-07-29 20:50:33 +0000577/* enable or disable single step mode. EXCP_DEBUG is returned by the
578 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200579void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000580{
bellard1fddef42005-04-17 19:16:13 +0000581#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200582 if (cpu->singlestep_enabled != enabled) {
583 cpu->singlestep_enabled = enabled;
584 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200585 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200586 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100587 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000588 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200589 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000590 tb_flush(env);
591 }
bellardc33a3462003-07-29 20:50:33 +0000592 }
593#endif
594}
595
Andreas Färber9349b4f2012-03-14 01:38:32 +0100596void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000597{
Andreas Färber878096e2013-05-27 01:33:50 +0200598 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000599 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000600 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000601
602 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000603 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000604 fprintf(stderr, "qemu: fatal: ");
605 vfprintf(stderr, fmt, ap);
606 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200607 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000608 if (qemu_log_enabled()) {
609 qemu_log("qemu: fatal: ");
610 qemu_log_vprintf(fmt, ap2);
611 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200612 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000613 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000614 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000615 }
pbrook493ae1f2007-11-23 16:53:59 +0000616 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000617 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200618#if defined(CONFIG_USER_ONLY)
619 {
620 struct sigaction act;
621 sigfillset(&act.sa_mask);
622 act.sa_handler = SIG_DFL;
623 sigaction(SIGABRT, &act, NULL);
624 }
625#endif
bellard75012672003-06-21 13:11:07 +0000626 abort();
627}
628
bellard01243112004-01-04 15:48:17 +0000629#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200630static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
631{
632 RAMBlock *block;
633
634 /* The list is protected by the iothread lock here. */
635 block = ram_list.mru_block;
636 if (block && addr - block->offset < block->length) {
637 goto found;
638 }
639 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
640 if (addr - block->offset < block->length) {
641 goto found;
642 }
643 }
644
645 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
646 abort();
647
648found:
649 ram_list.mru_block = block;
650 return block;
651}
652
Juan Quintelad24981d2012-05-22 00:42:40 +0200653static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
654 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000655{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200656 RAMBlock *block;
657 ram_addr_t start1;
bellardf23db162005-08-21 19:12:28 +0000658
Paolo Bonzini041603f2013-09-09 17:49:45 +0200659 block = qemu_get_ram_block(start);
660 assert(block == qemu_get_ram_block(end - 1));
661 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000662 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200663}
664
665/* Note: start and end must be within the same ram block. */
666void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
667 int dirty_flags)
668{
669 uintptr_t length;
670
671 start &= TARGET_PAGE_MASK;
672 end = TARGET_PAGE_ALIGN(end);
673
674 length = end - start;
675 if (length == 0)
676 return;
677 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
678
679 if (tcg_enabled()) {
680 tlb_reset_dirty_range_all(start, end, length);
681 }
bellard1ccde1c2004-02-06 19:46:14 +0000682}
683
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000684static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000685{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200686 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000687 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200688 return ret;
aliguori74576192008-10-06 14:02:03 +0000689}
690
Avi Kivitya8170e52012-10-23 12:30:10 +0200691hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200692 MemoryRegionSection *section,
693 target_ulong vaddr,
694 hwaddr paddr, hwaddr xlat,
695 int prot,
696 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000697{
Avi Kivitya8170e52012-10-23 12:30:10 +0200698 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000699 CPUWatchpoint *wp;
700
Blue Swirlcc5bea62012-04-14 14:56:48 +0000701 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000702 /* Normal RAM. */
703 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200704 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000705 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200706 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000707 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200708 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000709 }
710 } else {
Paolo Bonzini0475d942013-05-29 12:28:21 +0200711 iotlb = section - address_space_memory.dispatch->sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200712 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000713 }
714
715 /* Make accesses to pages with watchpoints go via the
716 watchpoint trap routines. */
717 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
718 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
719 /* Avoid trapping reads of pages with a write breakpoint. */
720 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200721 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000722 *address |= TLB_MMIO;
723 break;
724 }
725 }
726 }
727
728 return iotlb;
729}
bellard9fa3e852004-01-04 18:06:42 +0000730#endif /* defined(CONFIG_USER_ONLY) */
731
pbrooke2eef172008-06-08 01:09:01 +0000732#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000733
Anthony Liguoric227f092009-10-01 16:12:16 -0500734static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200735 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200736static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200737
Stefan Weil575ddeb2013-09-29 20:56:45 +0200738static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200739
740/*
741 * Set a custom physical guest memory alloator.
742 * Accelerators with unusual needs may need this. Hopefully, we can
743 * get rid of it eventually.
744 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200745void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200746{
747 phys_mem_alloc = alloc;
748}
749
Avi Kivity5312bd82012-02-12 18:32:55 +0200750static uint16_t phys_section_add(MemoryRegionSection *section)
751{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200752 /* The physical section number is ORed with a page-aligned
753 * pointer to produce the iotlb entries. Thus it should
754 * never overflow into the page-aligned value.
755 */
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200756 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200757
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200758 if (next_map.sections_nb == next_map.sections_nb_alloc) {
759 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
760 16);
761 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
762 next_map.sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200763 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200764 next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200765 memory_region_ref(section->mr);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200766 return next_map.sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200767}
768
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200769static void phys_section_destroy(MemoryRegion *mr)
770{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200771 memory_region_unref(mr);
772
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200773 if (mr->subpage) {
774 subpage_t *subpage = container_of(mr, subpage_t, iomem);
775 memory_region_destroy(&subpage->iomem);
776 g_free(subpage);
777 }
778}
779
Paolo Bonzini60926662013-05-29 12:30:26 +0200780static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200781{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200782 while (map->sections_nb > 0) {
783 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200784 phys_section_destroy(section->mr);
785 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200786 g_free(map->sections);
787 g_free(map->nodes);
Paolo Bonzini60926662013-05-29 12:30:26 +0200788 g_free(map);
Avi Kivity5312bd82012-02-12 18:32:55 +0200789}
790
Avi Kivityac1970f2012-10-03 16:22:53 +0200791static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200792{
793 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200794 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200795 & TARGET_PAGE_MASK;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200796 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
797 next_map.nodes, next_map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200798 MemoryRegionSection subsection = {
799 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200800 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200801 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200802 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200803
Avi Kivityf3705d52012-03-08 16:16:34 +0200804 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200805
Avi Kivityf3705d52012-03-08 16:16:34 +0200806 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200807 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200808 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200809 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200810 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200811 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200812 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200813 }
814 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200815 end = start + int128_get64(section->size) - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200816 subpage_register(subpage, start, end, phys_section_add(section));
817}
818
819
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200820static void register_multipage(AddressSpaceDispatch *d,
821 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000822{
Avi Kivitya8170e52012-10-23 12:30:10 +0200823 hwaddr start_addr = section->offset_within_address_space;
Avi Kivity5312bd82012-02-12 18:32:55 +0200824 uint16_t section_index = phys_section_add(section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200825 uint64_t num_pages = int128_get64(int128_rshift(section->size,
826 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200827
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200828 assert(num_pages);
829 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000830}
831
Avi Kivityac1970f2012-10-03 16:22:53 +0200832static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200833{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200834 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200835 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200836 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200837 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200838
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200839 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
840 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
841 - now.offset_within_address_space;
842
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200843 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200844 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200845 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200846 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200847 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200848 while (int128_ne(remain.size, now.size)) {
849 remain.size = int128_sub(remain.size, now.size);
850 remain.offset_within_address_space += int128_get64(now.size);
851 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400852 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200853 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200854 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800855 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200856 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200857 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400858 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200859 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200860 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400861 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200862 }
863}
864
Sheng Yang62a27442010-01-26 19:21:16 +0800865void qemu_flush_coalesced_mmio_buffer(void)
866{
867 if (kvm_enabled())
868 kvm_flush_coalesced_mmio_buffer();
869}
870
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700871void qemu_mutex_lock_ramlist(void)
872{
873 qemu_mutex_lock(&ram_list.mutex);
874}
875
876void qemu_mutex_unlock_ramlist(void)
877{
878 qemu_mutex_unlock(&ram_list.mutex);
879}
880
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200881#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300882
883#include <sys/vfs.h>
884
885#define HUGETLBFS_MAGIC 0x958458f6
886
887static long gethugepagesize(const char *path)
888{
889 struct statfs fs;
890 int ret;
891
892 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900893 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300894 } while (ret != 0 && errno == EINTR);
895
896 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900897 perror(path);
898 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300899 }
900
901 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900902 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300903
904 return fs.f_bsize;
905}
906
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200907static sigjmp_buf sigjump;
908
909static void sigbus_handler(int signal)
910{
911 siglongjmp(sigjump, 1);
912}
913
Alex Williamson04b16652010-07-02 11:13:17 -0600914static void *file_ram_alloc(RAMBlock *block,
915 ram_addr_t memory,
916 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300917{
918 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500919 char *sanitized_name;
920 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300921 void *area;
922 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300923 unsigned long hpagesize;
924
925 hpagesize = gethugepagesize(path);
926 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900927 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300928 }
929
930 if (memory < hpagesize) {
931 return NULL;
932 }
933
934 if (kvm_enabled() && !kvm_has_sync_mmu()) {
935 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
936 return NULL;
937 }
938
Peter Feiner8ca761f2013-03-04 13:54:25 -0500939 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
940 sanitized_name = g_strdup(block->mr->name);
941 for (c = sanitized_name; *c != '\0'; c++) {
942 if (*c == '/')
943 *c = '_';
944 }
945
946 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
947 sanitized_name);
948 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300949
950 fd = mkstemp(filename);
951 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900952 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100953 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900954 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300955 }
956 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100957 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300958
959 memory = (memory+hpagesize-1) & ~(hpagesize-1);
960
961 /*
962 * ftruncate is not supported by hugetlbfs in older
963 * hosts, so don't bother bailing out on errors.
964 * If anything goes wrong with it under other filesystems,
965 * mmap will fail.
966 */
967 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900968 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300969
Marcelo Tosattic9027602010-03-01 20:25:08 -0300970 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300971 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900972 perror("file_ram_alloc: can't mmap RAM pages");
973 close(fd);
974 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300975 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200976
977 if (mem_prealloc) {
978 int ret, i;
979 struct sigaction act, oldact;
980 sigset_t set, oldset;
981
982 memset(&act, 0, sizeof(act));
983 act.sa_handler = &sigbus_handler;
984 act.sa_flags = 0;
985
986 ret = sigaction(SIGBUS, &act, &oldact);
987 if (ret) {
988 perror("file_ram_alloc: failed to install signal handler");
989 exit(1);
990 }
991
992 /* unblock SIGBUS */
993 sigemptyset(&set);
994 sigaddset(&set, SIGBUS);
995 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
996
997 if (sigsetjmp(sigjump, 1)) {
998 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
999 exit(1);
1000 }
1001
1002 /* MAP_POPULATE silently ignores failures */
1003 for (i = 0; i < (memory/hpagesize)-1; i++) {
1004 memset(area + (hpagesize*i), 0, 1);
1005 }
1006
1007 ret = sigaction(SIGBUS, &oldact, NULL);
1008 if (ret) {
1009 perror("file_ram_alloc: failed to reinstall signal handler");
1010 exit(1);
1011 }
1012
1013 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1014 }
1015
Alex Williamson04b16652010-07-02 11:13:17 -06001016 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001017 return area;
1018}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001019#else
1020static void *file_ram_alloc(RAMBlock *block,
1021 ram_addr_t memory,
1022 const char *path)
1023{
1024 fprintf(stderr, "-mem-path not supported on this host\n");
1025 exit(1);
1026}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001027#endif
1028
Alex Williamsond17b5282010-06-25 11:08:38 -06001029static ram_addr_t find_ram_offset(ram_addr_t size)
1030{
Alex Williamson04b16652010-07-02 11:13:17 -06001031 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001032 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001033
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001034 assert(size != 0); /* it would hand out same offset multiple times */
1035
Paolo Bonzinia3161032012-11-14 15:54:48 +01001036 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001037 return 0;
1038
Paolo Bonzinia3161032012-11-14 15:54:48 +01001039 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001040 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001041
1042 end = block->offset + block->length;
1043
Paolo Bonzinia3161032012-11-14 15:54:48 +01001044 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001045 if (next_block->offset >= end) {
1046 next = MIN(next, next_block->offset);
1047 }
1048 }
1049 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001050 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001051 mingap = next - end;
1052 }
1053 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001054
1055 if (offset == RAM_ADDR_MAX) {
1056 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1057 (uint64_t)size);
1058 abort();
1059 }
1060
Alex Williamson04b16652010-07-02 11:13:17 -06001061 return offset;
1062}
1063
Juan Quintela652d7ec2012-07-20 10:37:54 +02001064ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001065{
Alex Williamsond17b5282010-06-25 11:08:38 -06001066 RAMBlock *block;
1067 ram_addr_t last = 0;
1068
Paolo Bonzinia3161032012-11-14 15:54:48 +01001069 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001070 last = MAX(last, block->offset + block->length);
1071
1072 return last;
1073}
1074
Jason Baronddb97f12012-08-02 15:44:16 -04001075static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1076{
1077 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001078
1079 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001080 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1081 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001082 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1083 if (ret) {
1084 perror("qemu_madvise");
1085 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1086 "but dump_guest_core=off specified\n");
1087 }
1088 }
1089}
1090
Avi Kivityc5705a72011-12-20 15:59:12 +02001091void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001092{
1093 RAMBlock *new_block, *block;
1094
Avi Kivityc5705a72011-12-20 15:59:12 +02001095 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001096 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001097 if (block->offset == addr) {
1098 new_block = block;
1099 break;
1100 }
1101 }
1102 assert(new_block);
1103 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001104
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001105 if (dev) {
1106 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001107 if (id) {
1108 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001109 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001110 }
1111 }
1112 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1113
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001114 /* This assumes the iothread lock is taken here too. */
1115 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001116 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001117 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001118 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1119 new_block->idstr);
1120 abort();
1121 }
1122 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001123 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001124}
1125
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001126static int memory_try_enable_merging(void *addr, size_t len)
1127{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001128 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001129 /* disabled by the user */
1130 return 0;
1131 }
1132
1133 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1134}
1135
Avi Kivityc5705a72011-12-20 15:59:12 +02001136ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1137 MemoryRegion *mr)
1138{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001139 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001140
1141 size = TARGET_PAGE_ALIGN(size);
1142 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001143 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001144
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001145 /* This assumes the iothread lock is taken here too. */
1146 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001147 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001148 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001149 if (host) {
1150 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001151 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001152 } else if (xen_enabled()) {
1153 if (mem_path) {
1154 fprintf(stderr, "-mem-path not supported with Xen\n");
1155 exit(1);
1156 }
1157 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001158 } else {
1159 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001160 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1161 /*
1162 * file_ram_alloc() needs to allocate just like
1163 * phys_mem_alloc, but we haven't bothered to provide
1164 * a hook there.
1165 */
1166 fprintf(stderr,
1167 "-mem-path not supported with this accelerator\n");
1168 exit(1);
1169 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001170 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001171 }
1172 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001173 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001174 if (!new_block->host) {
1175 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1176 new_block->mr->name, strerror(errno));
1177 exit(1);
1178 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001179 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001180 }
1181 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001182 new_block->length = size;
1183
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001184 /* Keep the list sorted from biggest to smallest block. */
1185 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1186 if (block->length < new_block->length) {
1187 break;
1188 }
1189 }
1190 if (block) {
1191 QTAILQ_INSERT_BEFORE(block, new_block, next);
1192 } else {
1193 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1194 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001195 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001196
Umesh Deshpandef798b072011-08-18 11:41:17 -07001197 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001198 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001199
Anthony Liguori7267c092011-08-20 22:09:37 -05001200 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001201 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001202 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1203 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001204 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001205
Jason Baronddb97f12012-08-02 15:44:16 -04001206 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001207 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001208 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001209
Cam Macdonell84b89d72010-07-26 18:10:57 -06001210 if (kvm_enabled())
1211 kvm_setup_guest_memory(new_block->host, size);
1212
1213 return new_block->offset;
1214}
1215
Avi Kivityc5705a72011-12-20 15:59:12 +02001216ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001217{
Avi Kivityc5705a72011-12-20 15:59:12 +02001218 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001219}
bellarde9a1ab12007-02-08 23:08:38 +00001220
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001221void qemu_ram_free_from_ptr(ram_addr_t addr)
1222{
1223 RAMBlock *block;
1224
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001225 /* This assumes the iothread lock is taken here too. */
1226 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001227 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001228 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001229 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001230 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001231 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001232 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001233 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001234 }
1235 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001236 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001237}
1238
Anthony Liguoric227f092009-10-01 16:12:16 -05001239void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001240{
Alex Williamson04b16652010-07-02 11:13:17 -06001241 RAMBlock *block;
1242
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001243 /* This assumes the iothread lock is taken here too. */
1244 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001245 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001246 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001247 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001248 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001249 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001250 if (block->flags & RAM_PREALLOC_MASK) {
1251 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001252 } else if (xen_enabled()) {
1253 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001254#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001255 } else if (block->fd >= 0) {
1256 munmap(block->host, block->length);
1257 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001258#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001259 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001260 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001261 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001262 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001263 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001264 }
1265 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001266 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001267
bellarde9a1ab12007-02-08 23:08:38 +00001268}
1269
Huang Yingcd19cfa2011-03-02 08:56:19 +01001270#ifndef _WIN32
1271void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1272{
1273 RAMBlock *block;
1274 ram_addr_t offset;
1275 int flags;
1276 void *area, *vaddr;
1277
Paolo Bonzinia3161032012-11-14 15:54:48 +01001278 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001279 offset = addr - block->offset;
1280 if (offset < block->length) {
1281 vaddr = block->host + offset;
1282 if (block->flags & RAM_PREALLOC_MASK) {
1283 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001284 } else if (xen_enabled()) {
1285 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001286 } else {
1287 flags = MAP_FIXED;
1288 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001289 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001290#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001291 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1292 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001293#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001294 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001295#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001296 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1297 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001298 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001299 /*
1300 * Remap needs to match alloc. Accelerators that
1301 * set phys_mem_alloc never remap. If they did,
1302 * we'd need a remap hook here.
1303 */
1304 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1305
Huang Yingcd19cfa2011-03-02 08:56:19 +01001306 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1307 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1308 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001309 }
1310 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001311 fprintf(stderr, "Could not remap addr: "
1312 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001313 length, addr);
1314 exit(1);
1315 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001316 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001317 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001318 }
1319 return;
1320 }
1321 }
1322}
1323#endif /* !_WIN32 */
1324
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001325/* Return a host pointer to ram allocated with qemu_ram_alloc.
1326 With the exception of the softmmu code in this file, this should
1327 only be used for local memory (e.g. video ram) that the device owns,
1328 and knows it isn't going to access beyond the end of the block.
1329
1330 It should not be used for general purpose DMA.
1331 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1332 */
1333void *qemu_get_ram_ptr(ram_addr_t addr)
1334{
1335 RAMBlock *block = qemu_get_ram_block(addr);
1336
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001337 if (xen_enabled()) {
1338 /* We need to check if the requested address is in the RAM
1339 * because we don't want to map the entire memory in QEMU.
1340 * In that case just map until the end of the page.
1341 */
1342 if (block->offset == 0) {
1343 return xen_map_cache(addr, 0, 0);
1344 } else if (block->host == NULL) {
1345 block->host =
1346 xen_map_cache(block->offset, block->length, 1);
1347 }
1348 }
1349 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001350}
1351
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001352/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1353 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001354static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001355{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001356 if (*size == 0) {
1357 return NULL;
1358 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001359 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001360 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001361 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001362 RAMBlock *block;
1363
Paolo Bonzinia3161032012-11-14 15:54:48 +01001364 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001365 if (addr - block->offset < block->length) {
1366 if (addr - block->offset + *size > block->length)
1367 *size = block->length - addr + block->offset;
1368 return block->host + (addr - block->offset);
1369 }
1370 }
1371
1372 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1373 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001374 }
1375}
1376
Paolo Bonzini7443b432013-06-03 12:44:02 +02001377/* Some of the softmmu routines need to translate from a host pointer
1378 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001379MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001380{
pbrook94a6b542009-04-11 17:15:54 +00001381 RAMBlock *block;
1382 uint8_t *host = ptr;
1383
Jan Kiszka868bb332011-06-21 22:59:09 +02001384 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001385 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001386 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001387 }
1388
Paolo Bonzini23887b72013-05-06 14:28:39 +02001389 block = ram_list.mru_block;
1390 if (block && block->host && host - block->host < block->length) {
1391 goto found;
1392 }
1393
Paolo Bonzinia3161032012-11-14 15:54:48 +01001394 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001395 /* This case append when the block is not mapped. */
1396 if (block->host == NULL) {
1397 continue;
1398 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001399 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001400 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001401 }
pbrook94a6b542009-04-11 17:15:54 +00001402 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001403
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001404 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001405
1406found:
1407 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001408 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001409}
Alex Williamsonf471a172010-06-11 11:11:42 -06001410
Avi Kivitya8170e52012-10-23 12:30:10 +02001411static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001412 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001413{
bellard3a7d9292005-08-21 09:26:42 +00001414 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001415 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001416 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001417 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001418 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001419 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001420 switch (size) {
1421 case 1:
1422 stb_p(qemu_get_ram_ptr(ram_addr), val);
1423 break;
1424 case 2:
1425 stw_p(qemu_get_ram_ptr(ram_addr), val);
1426 break;
1427 case 4:
1428 stl_p(qemu_get_ram_ptr(ram_addr), val);
1429 break;
1430 default:
1431 abort();
1432 }
bellardf23db162005-08-21 19:12:28 +00001433 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001434 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001435 /* we remove the notdirty callback only if the code has been
1436 flushed */
Andreas Färber4917cf42013-05-27 05:17:50 +02001437 if (dirty_flags == 0xff) {
1438 CPUArchState *env = current_cpu->env_ptr;
1439 tlb_set_dirty(env, env->mem_io_vaddr);
1440 }
bellard1ccde1c2004-02-06 19:46:14 +00001441}
1442
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001443static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1444 unsigned size, bool is_write)
1445{
1446 return is_write;
1447}
1448
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001449static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001450 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001451 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001452 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001453};
1454
pbrook0f459d12008-06-09 00:20:13 +00001455/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001456static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001457{
Andreas Färber4917cf42013-05-27 05:17:50 +02001458 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001459 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001460 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001461 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001462 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001463
aliguori06d55cc2008-11-18 20:24:06 +00001464 if (env->watchpoint_hit) {
1465 /* We re-entered the check after replacing the TB. Now raise
1466 * the debug interrupt so that is will trigger after the
1467 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001468 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001469 return;
1470 }
pbrook2e70f6e2008-06-29 01:03:05 +00001471 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001472 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001473 if ((vaddr == (wp->vaddr & len_mask) ||
1474 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001475 wp->flags |= BP_WATCHPOINT_HIT;
1476 if (!env->watchpoint_hit) {
1477 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001478 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001479 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1480 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001481 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001482 } else {
1483 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1484 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001485 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001486 }
aliguori06d55cc2008-11-18 20:24:06 +00001487 }
aliguori6e140f22008-11-18 20:37:55 +00001488 } else {
1489 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001490 }
1491 }
1492}
1493
pbrook6658ffb2007-03-16 23:58:11 +00001494/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1495 so these check for a hit then pass through to the normal out-of-line
1496 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001497static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001498 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001499{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001500 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1501 switch (size) {
1502 case 1: return ldub_phys(addr);
1503 case 2: return lduw_phys(addr);
1504 case 4: return ldl_phys(addr);
1505 default: abort();
1506 }
pbrook6658ffb2007-03-16 23:58:11 +00001507}
1508
Avi Kivitya8170e52012-10-23 12:30:10 +02001509static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001510 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001511{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001512 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1513 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001514 case 1:
1515 stb_phys(addr, val);
1516 break;
1517 case 2:
1518 stw_phys(addr, val);
1519 break;
1520 case 4:
1521 stl_phys(addr, val);
1522 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001523 default: abort();
1524 }
pbrook6658ffb2007-03-16 23:58:11 +00001525}
1526
Avi Kivity1ec9b902012-01-02 12:47:48 +02001527static const MemoryRegionOps watch_mem_ops = {
1528 .read = watch_mem_read,
1529 .write = watch_mem_write,
1530 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001531};
pbrook6658ffb2007-03-16 23:58:11 +00001532
Avi Kivitya8170e52012-10-23 12:30:10 +02001533static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001534 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001535{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001536 subpage_t *subpage = opaque;
1537 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001538
blueswir1db7b5422007-05-26 17:36:03 +00001539#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001540 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001541 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001542#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001543 address_space_read(subpage->as, addr + subpage->base, buf, len);
1544 switch (len) {
1545 case 1:
1546 return ldub_p(buf);
1547 case 2:
1548 return lduw_p(buf);
1549 case 4:
1550 return ldl_p(buf);
1551 default:
1552 abort();
1553 }
blueswir1db7b5422007-05-26 17:36:03 +00001554}
1555
Avi Kivitya8170e52012-10-23 12:30:10 +02001556static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001557 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001558{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001559 subpage_t *subpage = opaque;
1560 uint8_t buf[4];
1561
blueswir1db7b5422007-05-26 17:36:03 +00001562#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001563 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001564 " value %"PRIx64"\n",
1565 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001566#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001567 switch (len) {
1568 case 1:
1569 stb_p(buf, value);
1570 break;
1571 case 2:
1572 stw_p(buf, value);
1573 break;
1574 case 4:
1575 stl_p(buf, value);
1576 break;
1577 default:
1578 abort();
1579 }
1580 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001581}
1582
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001583static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001584 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001585{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001586 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001587#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001588 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001589 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001590#endif
1591
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001592 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001593 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001594}
1595
Avi Kivity70c68e42012-01-02 12:32:48 +02001596static const MemoryRegionOps subpage_ops = {
1597 .read = subpage_read,
1598 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001599 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001600 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001601};
1602
Anthony Liguoric227f092009-10-01 16:12:16 -05001603static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001604 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001605{
1606 int idx, eidx;
1607
1608 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1609 return -1;
1610 idx = SUBPAGE_IDX(start);
1611 eidx = SUBPAGE_IDX(end);
1612#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001613 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1614 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001615#endif
blueswir1db7b5422007-05-26 17:36:03 +00001616 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001617 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001618 }
1619
1620 return 0;
1621}
1622
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001623static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001624{
Anthony Liguoric227f092009-10-01 16:12:16 -05001625 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001626
Anthony Liguori7267c092011-08-20 22:09:37 -05001627 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001628
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001629 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001630 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001631 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001632 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001633 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001634#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001635 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1636 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001637#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001638 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001639
1640 return mmio;
1641}
1642
Avi Kivity5312bd82012-02-12 18:32:55 +02001643static uint16_t dummy_section(MemoryRegion *mr)
1644{
1645 MemoryRegionSection section = {
1646 .mr = mr,
1647 .offset_within_address_space = 0,
1648 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001649 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001650 };
1651
1652 return phys_section_add(&section);
1653}
1654
Avi Kivitya8170e52012-10-23 12:30:10 +02001655MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001656{
Paolo Bonzini0475d942013-05-29 12:28:21 +02001657 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001658}
1659
Avi Kivitye9179ce2009-06-14 11:38:52 +03001660static void io_mem_init(void)
1661{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001662 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1663 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001664 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001665 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001666 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001667 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001668 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001669}
1670
Avi Kivityac1970f2012-10-03 16:22:53 +02001671static void mem_begin(MemoryListener *listener)
1672{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001673 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001674 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1675
1676 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1677 d->as = as;
1678 as->next_dispatch = d;
1679}
1680
1681static void mem_commit(MemoryListener *listener)
1682{
1683 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001684 AddressSpaceDispatch *cur = as->dispatch;
1685 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001686
Paolo Bonzini0475d942013-05-29 12:28:21 +02001687 next->nodes = next_map.nodes;
1688 next->sections = next_map.sections;
1689
1690 as->dispatch = next;
1691 g_free(cur);
Avi Kivityac1970f2012-10-03 16:22:53 +02001692}
1693
Avi Kivity50c1e142012-02-08 21:36:02 +02001694static void core_begin(MemoryListener *listener)
1695{
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001696 uint16_t n;
1697
Paolo Bonzini60926662013-05-29 12:30:26 +02001698 prev_map = g_new(PhysPageMap, 1);
1699 *prev_map = next_map;
1700
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001701 memset(&next_map, 0, sizeof(next_map));
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001702 n = dummy_section(&io_mem_unassigned);
1703 assert(n == PHYS_SECTION_UNASSIGNED);
1704 n = dummy_section(&io_mem_notdirty);
1705 assert(n == PHYS_SECTION_NOTDIRTY);
1706 n = dummy_section(&io_mem_rom);
1707 assert(n == PHYS_SECTION_ROM);
1708 n = dummy_section(&io_mem_watch);
1709 assert(n == PHYS_SECTION_WATCH);
Avi Kivity50c1e142012-02-08 21:36:02 +02001710}
1711
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001712/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1713 * All AddressSpaceDispatch instances have switched to the next map.
1714 */
1715static void core_commit(MemoryListener *listener)
1716{
Paolo Bonzini60926662013-05-29 12:30:26 +02001717 phys_sections_free(prev_map);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001718}
1719
Avi Kivity1d711482012-10-02 18:54:45 +02001720static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001721{
Andreas Färber182735e2013-05-29 22:29:20 +02001722 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001723
1724 /* since each CPU stores ram addresses in its TLB cache, we must
1725 reset the modified entries */
1726 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001727 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001728 CPUArchState *env = cpu->env_ptr;
1729
Avi Kivity117712c2012-02-12 21:23:17 +02001730 tlb_flush(env, 1);
1731 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001732}
1733
Avi Kivity93632742012-02-08 16:54:16 +02001734static void core_log_global_start(MemoryListener *listener)
1735{
1736 cpu_physical_memory_set_dirty_tracking(1);
1737}
1738
1739static void core_log_global_stop(MemoryListener *listener)
1740{
1741 cpu_physical_memory_set_dirty_tracking(0);
1742}
1743
Avi Kivity93632742012-02-08 16:54:16 +02001744static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001745 .begin = core_begin,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001746 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02001747 .log_global_start = core_log_global_start,
1748 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001749 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001750};
1751
Avi Kivity1d711482012-10-02 18:54:45 +02001752static MemoryListener tcg_memory_listener = {
1753 .commit = tcg_commit,
1754};
1755
Avi Kivityac1970f2012-10-03 16:22:53 +02001756void address_space_init_dispatch(AddressSpace *as)
1757{
Paolo Bonzini00752702013-05-29 12:13:54 +02001758 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001759 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001760 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001761 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001762 .region_add = mem_add,
1763 .region_nop = mem_add,
1764 .priority = 0,
1765 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001766 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001767}
1768
Avi Kivity83f3c252012-10-07 12:59:55 +02001769void address_space_destroy_dispatch(AddressSpace *as)
1770{
1771 AddressSpaceDispatch *d = as->dispatch;
1772
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001773 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001774 g_free(d);
1775 as->dispatch = NULL;
1776}
1777
Avi Kivity62152b82011-07-26 14:26:14 +03001778static void memory_map_init(void)
1779{
Anthony Liguori7267c092011-08-20 22:09:37 -05001780 system_memory = g_malloc(sizeof(*system_memory));
Michael S. Tsirkinef9e4552013-11-10 11:54:33 +02001781 memory_region_init(system_memory, NULL, "system", INT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001782 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001783
Anthony Liguori7267c092011-08-20 22:09:37 -05001784 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001785 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1786 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001787 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001788
Avi Kivityf6790af2012-10-02 20:13:51 +02001789 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001790 if (tcg_enabled()) {
1791 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1792 }
Avi Kivity62152b82011-07-26 14:26:14 +03001793}
1794
1795MemoryRegion *get_system_memory(void)
1796{
1797 return system_memory;
1798}
1799
Avi Kivity309cb472011-08-08 16:09:03 +03001800MemoryRegion *get_system_io(void)
1801{
1802 return system_io;
1803}
1804
pbrooke2eef172008-06-08 01:09:01 +00001805#endif /* !defined(CONFIG_USER_ONLY) */
1806
bellard13eb76e2004-01-24 15:23:36 +00001807/* physical memory access (slow version, mainly for debug) */
1808#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001809int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001810 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001811{
1812 int l, flags;
1813 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001814 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001815
1816 while (len > 0) {
1817 page = addr & TARGET_PAGE_MASK;
1818 l = (page + TARGET_PAGE_SIZE) - addr;
1819 if (l > len)
1820 l = len;
1821 flags = page_get_flags(page);
1822 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001823 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001824 if (is_write) {
1825 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001826 return -1;
bellard579a97f2007-11-11 14:26:47 +00001827 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001828 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001829 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001830 memcpy(p, buf, l);
1831 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001832 } else {
1833 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001834 return -1;
bellard579a97f2007-11-11 14:26:47 +00001835 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001836 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001837 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001838 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001839 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001840 }
1841 len -= l;
1842 buf += l;
1843 addr += l;
1844 }
Paul Brooka68fe892010-03-01 00:08:59 +00001845 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001846}
bellard8df1cd02005-01-28 22:37:22 +00001847
bellard13eb76e2004-01-24 15:23:36 +00001848#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001849
Avi Kivitya8170e52012-10-23 12:30:10 +02001850static void invalidate_and_set_dirty(hwaddr addr,
1851 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001852{
1853 if (!cpu_physical_memory_is_dirty(addr)) {
1854 /* invalidate code */
1855 tb_invalidate_phys_page_range(addr, addr + length, 0);
1856 /* set dirty bit */
1857 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1858 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001859 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001860}
1861
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001862static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1863{
1864 if (memory_region_is_ram(mr)) {
1865 return !(is_write && mr->readonly);
1866 }
1867 if (memory_region_is_romd(mr)) {
1868 return !is_write;
1869 }
1870
1871 return false;
1872}
1873
Richard Henderson23326162013-07-08 14:55:59 -07001874static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001875{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001876 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001877
1878 /* Regions are assumed to support 1-4 byte accesses unless
1879 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001880 if (access_size_max == 0) {
1881 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001882 }
Richard Henderson23326162013-07-08 14:55:59 -07001883
1884 /* Bound the maximum access by the alignment of the address. */
1885 if (!mr->ops->impl.unaligned) {
1886 unsigned align_size_max = addr & -addr;
1887 if (align_size_max != 0 && align_size_max < access_size_max) {
1888 access_size_max = align_size_max;
1889 }
1890 }
1891
1892 /* Don't attempt accesses larger than the maximum. */
1893 if (l > access_size_max) {
1894 l = access_size_max;
1895 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001896 if (l & (l - 1)) {
1897 l = 1 << (qemu_fls(l) - 1);
1898 }
Richard Henderson23326162013-07-08 14:55:59 -07001899
1900 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001901}
1902
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001903bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001904 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001905{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001906 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001907 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001908 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001909 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001910 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001911 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001912
bellard13eb76e2004-01-24 15:23:36 +00001913 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001914 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001915 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001916
bellard13eb76e2004-01-24 15:23:36 +00001917 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001918 if (!memory_access_is_direct(mr, is_write)) {
1919 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001920 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001921 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001922 switch (l) {
1923 case 8:
1924 /* 64 bit write access */
1925 val = ldq_p(buf);
1926 error |= io_mem_write(mr, addr1, val, 8);
1927 break;
1928 case 4:
bellard1c213d12005-09-03 10:49:04 +00001929 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001930 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001931 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07001932 break;
1933 case 2:
bellard1c213d12005-09-03 10:49:04 +00001934 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001935 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001936 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07001937 break;
1938 case 1:
bellard1c213d12005-09-03 10:49:04 +00001939 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001940 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001941 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07001942 break;
1943 default:
1944 abort();
bellard13eb76e2004-01-24 15:23:36 +00001945 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001946 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001947 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00001948 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001949 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001950 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001951 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001952 }
1953 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001954 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00001955 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001956 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07001957 switch (l) {
1958 case 8:
1959 /* 64 bit read access */
1960 error |= io_mem_read(mr, addr1, &val, 8);
1961 stq_p(buf, val);
1962 break;
1963 case 4:
bellard13eb76e2004-01-24 15:23:36 +00001964 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001965 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00001966 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001967 break;
1968 case 2:
bellard13eb76e2004-01-24 15:23:36 +00001969 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001970 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00001971 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001972 break;
1973 case 1:
bellard1c213d12005-09-03 10:49:04 +00001974 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001975 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00001976 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001977 break;
1978 default:
1979 abort();
bellard13eb76e2004-01-24 15:23:36 +00001980 }
1981 } else {
1982 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001983 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02001984 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001985 }
1986 }
1987 len -= l;
1988 buf += l;
1989 addr += l;
1990 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001991
1992 return error;
bellard13eb76e2004-01-24 15:23:36 +00001993}
bellard8df1cd02005-01-28 22:37:22 +00001994
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001995bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001996 const uint8_t *buf, int len)
1997{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001998 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02001999}
2000
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002001bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002002{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002003 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002004}
2005
2006
Avi Kivitya8170e52012-10-23 12:30:10 +02002007void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002008 int len, int is_write)
2009{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002010 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002011}
2012
bellardd0ecd2a2006-04-23 17:14:48 +00002013/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002014void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002015 const uint8_t *buf, int len)
2016{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002017 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002018 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002019 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002020 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002021
bellardd0ecd2a2006-04-23 17:14:48 +00002022 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002023 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002024 mr = address_space_translate(&address_space_memory,
2025 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002026
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002027 if (!(memory_region_is_ram(mr) ||
2028 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002029 /* do nothing */
2030 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002031 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002032 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002033 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002034 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002035 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002036 }
2037 len -= l;
2038 buf += l;
2039 addr += l;
2040 }
2041}
2042
aliguori6d16c2f2009-01-22 16:59:11 +00002043typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002044 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002045 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002046 hwaddr addr;
2047 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002048} BounceBuffer;
2049
2050static BounceBuffer bounce;
2051
aliguoriba223c22009-01-22 16:59:16 +00002052typedef struct MapClient {
2053 void *opaque;
2054 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002055 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002056} MapClient;
2057
Blue Swirl72cf2d42009-09-12 07:36:22 +00002058static QLIST_HEAD(map_client_list, MapClient) map_client_list
2059 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002060
2061void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2062{
Anthony Liguori7267c092011-08-20 22:09:37 -05002063 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002064
2065 client->opaque = opaque;
2066 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002067 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002068 return client;
2069}
2070
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002071static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002072{
2073 MapClient *client = (MapClient *)_client;
2074
Blue Swirl72cf2d42009-09-12 07:36:22 +00002075 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002076 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002077}
2078
2079static void cpu_notify_map_clients(void)
2080{
2081 MapClient *client;
2082
Blue Swirl72cf2d42009-09-12 07:36:22 +00002083 while (!QLIST_EMPTY(&map_client_list)) {
2084 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002085 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002086 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002087 }
2088}
2089
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002090bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2091{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002092 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002093 hwaddr l, xlat;
2094
2095 while (len > 0) {
2096 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002097 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2098 if (!memory_access_is_direct(mr, is_write)) {
2099 l = memory_access_size(mr, l, addr);
2100 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002101 return false;
2102 }
2103 }
2104
2105 len -= l;
2106 addr += l;
2107 }
2108 return true;
2109}
2110
aliguori6d16c2f2009-01-22 16:59:11 +00002111/* Map a physical memory region into a host virtual address.
2112 * May map a subset of the requested range, given by and returned in *plen.
2113 * May return NULL if resources needed to perform the mapping are exhausted.
2114 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002115 * Use cpu_register_map_client() to know when retrying the map operation is
2116 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002117 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002118void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002119 hwaddr addr,
2120 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002121 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002122{
Avi Kivitya8170e52012-10-23 12:30:10 +02002123 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002124 hwaddr done = 0;
2125 hwaddr l, xlat, base;
2126 MemoryRegion *mr, *this_mr;
2127 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002128
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002129 if (len == 0) {
2130 return NULL;
2131 }
aliguori6d16c2f2009-01-22 16:59:11 +00002132
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002133 l = len;
2134 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2135 if (!memory_access_is_direct(mr, is_write)) {
2136 if (bounce.buffer) {
2137 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002138 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002139 /* Avoid unbounded allocations */
2140 l = MIN(l, TARGET_PAGE_SIZE);
2141 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002142 bounce.addr = addr;
2143 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002144
2145 memory_region_ref(mr);
2146 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002147 if (!is_write) {
2148 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002149 }
aliguori6d16c2f2009-01-22 16:59:11 +00002150
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002151 *plen = l;
2152 return bounce.buffer;
2153 }
2154
2155 base = xlat;
2156 raddr = memory_region_get_ram_addr(mr);
2157
2158 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002159 len -= l;
2160 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002161 done += l;
2162 if (len == 0) {
2163 break;
2164 }
2165
2166 l = len;
2167 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2168 if (this_mr != mr || xlat != base + done) {
2169 break;
2170 }
aliguori6d16c2f2009-01-22 16:59:11 +00002171 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002172
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002173 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002174 *plen = done;
2175 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002176}
2177
Avi Kivityac1970f2012-10-03 16:22:53 +02002178/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002179 * Will also mark the memory as dirty if is_write == 1. access_len gives
2180 * the amount of memory that was actually read or written by the caller.
2181 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002182void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2183 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002184{
2185 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002186 MemoryRegion *mr;
2187 ram_addr_t addr1;
2188
2189 mr = qemu_ram_addr_from_host(buffer, &addr1);
2190 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002191 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002192 while (access_len) {
2193 unsigned l;
2194 l = TARGET_PAGE_SIZE;
2195 if (l > access_len)
2196 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002197 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002198 addr1 += l;
2199 access_len -= l;
2200 }
2201 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002202 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002203 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002204 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002205 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002206 return;
2207 }
2208 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002209 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002210 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002211 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002212 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002213 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002214 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002215}
bellardd0ecd2a2006-04-23 17:14:48 +00002216
Avi Kivitya8170e52012-10-23 12:30:10 +02002217void *cpu_physical_memory_map(hwaddr addr,
2218 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002219 int is_write)
2220{
2221 return address_space_map(&address_space_memory, addr, plen, is_write);
2222}
2223
Avi Kivitya8170e52012-10-23 12:30:10 +02002224void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2225 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002226{
2227 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2228}
2229
bellard8df1cd02005-01-28 22:37:22 +00002230/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002231static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002232 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002233{
bellard8df1cd02005-01-28 22:37:22 +00002234 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002235 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002236 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002237 hwaddr l = 4;
2238 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002239
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002240 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2241 false);
2242 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002243 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002244 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002245#if defined(TARGET_WORDS_BIGENDIAN)
2246 if (endian == DEVICE_LITTLE_ENDIAN) {
2247 val = bswap32(val);
2248 }
2249#else
2250 if (endian == DEVICE_BIG_ENDIAN) {
2251 val = bswap32(val);
2252 }
2253#endif
bellard8df1cd02005-01-28 22:37:22 +00002254 } else {
2255 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002256 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002257 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002258 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002259 switch (endian) {
2260 case DEVICE_LITTLE_ENDIAN:
2261 val = ldl_le_p(ptr);
2262 break;
2263 case DEVICE_BIG_ENDIAN:
2264 val = ldl_be_p(ptr);
2265 break;
2266 default:
2267 val = ldl_p(ptr);
2268 break;
2269 }
bellard8df1cd02005-01-28 22:37:22 +00002270 }
2271 return val;
2272}
2273
Avi Kivitya8170e52012-10-23 12:30:10 +02002274uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002275{
2276 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2277}
2278
Avi Kivitya8170e52012-10-23 12:30:10 +02002279uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002280{
2281 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2282}
2283
Avi Kivitya8170e52012-10-23 12:30:10 +02002284uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002285{
2286 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2287}
2288
bellard84b7b8e2005-11-28 21:19:04 +00002289/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002290static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002291 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002292{
bellard84b7b8e2005-11-28 21:19:04 +00002293 uint8_t *ptr;
2294 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002295 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002296 hwaddr l = 8;
2297 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002298
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002299 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2300 false);
2301 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002302 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002303 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002304#if defined(TARGET_WORDS_BIGENDIAN)
2305 if (endian == DEVICE_LITTLE_ENDIAN) {
2306 val = bswap64(val);
2307 }
2308#else
2309 if (endian == DEVICE_BIG_ENDIAN) {
2310 val = bswap64(val);
2311 }
2312#endif
bellard84b7b8e2005-11-28 21:19:04 +00002313 } else {
2314 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002315 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002316 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002317 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002318 switch (endian) {
2319 case DEVICE_LITTLE_ENDIAN:
2320 val = ldq_le_p(ptr);
2321 break;
2322 case DEVICE_BIG_ENDIAN:
2323 val = ldq_be_p(ptr);
2324 break;
2325 default:
2326 val = ldq_p(ptr);
2327 break;
2328 }
bellard84b7b8e2005-11-28 21:19:04 +00002329 }
2330 return val;
2331}
2332
Avi Kivitya8170e52012-10-23 12:30:10 +02002333uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002334{
2335 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2336}
2337
Avi Kivitya8170e52012-10-23 12:30:10 +02002338uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002339{
2340 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2341}
2342
Avi Kivitya8170e52012-10-23 12:30:10 +02002343uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002344{
2345 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2346}
2347
bellardaab33092005-10-30 20:48:42 +00002348/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002349uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002350{
2351 uint8_t val;
2352 cpu_physical_memory_read(addr, &val, 1);
2353 return val;
2354}
2355
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002356/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002357static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002358 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002359{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002360 uint8_t *ptr;
2361 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002362 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002363 hwaddr l = 2;
2364 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002365
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002366 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2367 false);
2368 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002369 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002370 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002371#if defined(TARGET_WORDS_BIGENDIAN)
2372 if (endian == DEVICE_LITTLE_ENDIAN) {
2373 val = bswap16(val);
2374 }
2375#else
2376 if (endian == DEVICE_BIG_ENDIAN) {
2377 val = bswap16(val);
2378 }
2379#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002380 } else {
2381 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002382 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002383 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002384 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002385 switch (endian) {
2386 case DEVICE_LITTLE_ENDIAN:
2387 val = lduw_le_p(ptr);
2388 break;
2389 case DEVICE_BIG_ENDIAN:
2390 val = lduw_be_p(ptr);
2391 break;
2392 default:
2393 val = lduw_p(ptr);
2394 break;
2395 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002396 }
2397 return val;
bellardaab33092005-10-30 20:48:42 +00002398}
2399
Avi Kivitya8170e52012-10-23 12:30:10 +02002400uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002401{
2402 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2403}
2404
Avi Kivitya8170e52012-10-23 12:30:10 +02002405uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002406{
2407 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2408}
2409
Avi Kivitya8170e52012-10-23 12:30:10 +02002410uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002411{
2412 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2413}
2414
bellard8df1cd02005-01-28 22:37:22 +00002415/* warning: addr must be aligned. The ram page is not masked as dirty
2416 and the code inside is not invalidated. It is useful if the dirty
2417 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002418void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002419{
bellard8df1cd02005-01-28 22:37:22 +00002420 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002421 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002422 hwaddr l = 4;
2423 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002424
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002425 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2426 true);
2427 if (l < 4 || !memory_access_is_direct(mr, true)) {
2428 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002429 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002430 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002431 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002432 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002433
2434 if (unlikely(in_migration)) {
2435 if (!cpu_physical_memory_is_dirty(addr1)) {
2436 /* invalidate code */
2437 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2438 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002439 cpu_physical_memory_set_dirty_flags(
2440 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002441 }
2442 }
bellard8df1cd02005-01-28 22:37:22 +00002443 }
2444}
2445
2446/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002447static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002448 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002449{
bellard8df1cd02005-01-28 22:37:22 +00002450 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002451 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002452 hwaddr l = 4;
2453 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002454
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002455 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2456 true);
2457 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002458#if defined(TARGET_WORDS_BIGENDIAN)
2459 if (endian == DEVICE_LITTLE_ENDIAN) {
2460 val = bswap32(val);
2461 }
2462#else
2463 if (endian == DEVICE_BIG_ENDIAN) {
2464 val = bswap32(val);
2465 }
2466#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002467 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002468 } else {
bellard8df1cd02005-01-28 22:37:22 +00002469 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002470 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002471 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002472 switch (endian) {
2473 case DEVICE_LITTLE_ENDIAN:
2474 stl_le_p(ptr, val);
2475 break;
2476 case DEVICE_BIG_ENDIAN:
2477 stl_be_p(ptr, val);
2478 break;
2479 default:
2480 stl_p(ptr, val);
2481 break;
2482 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002483 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002484 }
2485}
2486
Avi Kivitya8170e52012-10-23 12:30:10 +02002487void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002488{
2489 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2490}
2491
Avi Kivitya8170e52012-10-23 12:30:10 +02002492void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002493{
2494 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2495}
2496
Avi Kivitya8170e52012-10-23 12:30:10 +02002497void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002498{
2499 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2500}
2501
bellardaab33092005-10-30 20:48:42 +00002502/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002503void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002504{
2505 uint8_t v = val;
2506 cpu_physical_memory_write(addr, &v, 1);
2507}
2508
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002509/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002510static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002511 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002512{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002513 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002514 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002515 hwaddr l = 2;
2516 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002517
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002518 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2519 true);
2520 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002521#if defined(TARGET_WORDS_BIGENDIAN)
2522 if (endian == DEVICE_LITTLE_ENDIAN) {
2523 val = bswap16(val);
2524 }
2525#else
2526 if (endian == DEVICE_BIG_ENDIAN) {
2527 val = bswap16(val);
2528 }
2529#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002530 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002531 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002532 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002533 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002534 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002535 switch (endian) {
2536 case DEVICE_LITTLE_ENDIAN:
2537 stw_le_p(ptr, val);
2538 break;
2539 case DEVICE_BIG_ENDIAN:
2540 stw_be_p(ptr, val);
2541 break;
2542 default:
2543 stw_p(ptr, val);
2544 break;
2545 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002546 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002547 }
bellardaab33092005-10-30 20:48:42 +00002548}
2549
Avi Kivitya8170e52012-10-23 12:30:10 +02002550void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002551{
2552 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2553}
2554
Avi Kivitya8170e52012-10-23 12:30:10 +02002555void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002556{
2557 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2558}
2559
Avi Kivitya8170e52012-10-23 12:30:10 +02002560void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002561{
2562 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2563}
2564
bellardaab33092005-10-30 20:48:42 +00002565/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002566void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002567{
2568 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002569 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002570}
2571
Avi Kivitya8170e52012-10-23 12:30:10 +02002572void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002573{
2574 val = cpu_to_le64(val);
2575 cpu_physical_memory_write(addr, &val, 8);
2576}
2577
Avi Kivitya8170e52012-10-23 12:30:10 +02002578void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002579{
2580 val = cpu_to_be64(val);
2581 cpu_physical_memory_write(addr, &val, 8);
2582}
2583
aliguori5e2972f2009-03-28 17:51:36 +00002584/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002585int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002586 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002587{
2588 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002589 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002590 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002591
2592 while (len > 0) {
2593 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002594 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002595 /* if no physical page mapped, return an error */
2596 if (phys_addr == -1)
2597 return -1;
2598 l = (page + TARGET_PAGE_SIZE) - addr;
2599 if (l > len)
2600 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002601 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002602 if (is_write)
2603 cpu_physical_memory_write_rom(phys_addr, buf, l);
2604 else
aliguori5e2972f2009-03-28 17:51:36 +00002605 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002606 len -= l;
2607 buf += l;
2608 addr += l;
2609 }
2610 return 0;
2611}
Paul Brooka68fe892010-03-01 00:08:59 +00002612#endif
bellard13eb76e2004-01-24 15:23:36 +00002613
Blue Swirl8e4a4242013-01-06 18:30:17 +00002614#if !defined(CONFIG_USER_ONLY)
2615
2616/*
2617 * A helper function for the _utterly broken_ virtio device model to find out if
2618 * it's running on a big endian machine. Don't do this at home kids!
2619 */
2620bool virtio_is_big_endian(void);
2621bool virtio_is_big_endian(void)
2622{
2623#if defined(TARGET_WORDS_BIGENDIAN)
2624 return true;
2625#else
2626 return false;
2627#endif
2628}
2629
2630#endif
2631
Wen Congyang76f35532012-05-07 12:04:18 +08002632#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002633bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002634{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002635 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002636 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002637
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002638 mr = address_space_translate(&address_space_memory,
2639 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002640
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002641 return !(memory_region_is_ram(mr) ||
2642 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002643}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002644
2645void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2646{
2647 RAMBlock *block;
2648
2649 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2650 func(block->host, block->offset, block->length, opaque);
2651 }
2652}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002653#endif