blob: 79610ce37a67b5b1b4dda30790c526a37c87dc00 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
aliguori74576192008-10-06 14:02:03 +000057static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000058
Paolo Bonzinia3161032012-11-14 15:54:48 +010059RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030060
61static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030062static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030063
Avi Kivityf6790af2012-10-02 20:13:51 +020064AddressSpace address_space_io;
65AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020066
Paolo Bonzini0844e002013-05-24 14:37:28 +020067MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020068static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069
pbrooke2eef172008-06-08 01:09:01 +000070#endif
bellard9fa3e852004-01-04 18:06:42 +000071
Andreas Färberbdc44642013-06-24 23:50:24 +020072struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000073/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020075DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000076/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000077 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000078 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010079int use_icount;
bellard6a00d602005-11-21 23:25:50 +000080
pbrooke2eef172008-06-08 01:09:01 +000081#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020082
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020083typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
Paolo Bonzini0475d942013-05-29 12:28:21 +020091typedef PhysPageEntry Node[L2_SIZE];
92
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020093struct AddressSpaceDispatch {
94 /* This is a multi-level map on the physical address space.
95 * The bottom level has pointers to MemoryRegionSections.
96 */
97 PhysPageEntry phys_map;
Paolo Bonzini0475d942013-05-29 12:28:21 +020098 Node *nodes;
99 MemoryRegionSection *sections;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200100 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101};
102
Jan Kiszka90260c62013-05-26 21:46:51 +0200103#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
104typedef struct subpage_t {
105 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200106 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200107 hwaddr base;
108 uint16_t sub_section[TARGET_PAGE_SIZE];
109} subpage_t;
110
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200111#define PHYS_SECTION_UNASSIGNED 0
112#define PHYS_SECTION_NOTDIRTY 1
113#define PHYS_SECTION_ROM 2
114#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200115
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200116typedef struct PhysPageMap {
117 unsigned sections_nb;
118 unsigned sections_nb_alloc;
119 unsigned nodes_nb;
120 unsigned nodes_nb_alloc;
121 Node *nodes;
122 MemoryRegionSection *sections;
123} PhysPageMap;
124
Paolo Bonzini60926662013-05-29 12:30:26 +0200125static PhysPageMap *prev_map;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200126static PhysPageMap next_map;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200127
Avi Kivity07f07b32012-02-13 20:45:32 +0200128#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200129
pbrooke2eef172008-06-08 01:09:01 +0000130static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300131static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000132
Avi Kivity1ec9b902012-01-02 12:47:48 +0200133static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000134#endif
bellard54936002003-05-13 00:25:15 +0000135
Paul Brook6d9a1302010-02-28 23:55:53 +0000136#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200137
Avi Kivityf7bf5462012-02-13 20:12:05 +0200138static void phys_map_node_reserve(unsigned nodes)
139{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200140 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
141 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
142 16);
143 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
144 next_map.nodes_nb + nodes);
145 next_map.nodes = g_renew(Node, next_map.nodes,
146 next_map.nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200147 }
148}
149
150static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200151{
152 unsigned i;
153 uint16_t ret;
154
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200155 ret = next_map.nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200156 assert(ret != PHYS_MAP_NODE_NIL);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200157 assert(ret != next_map.nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200158 for (i = 0; i < L2_SIZE; ++i) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200159 next_map.nodes[ret][i].is_leaf = 0;
160 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200161 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200162 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200163}
164
Avi Kivitya8170e52012-10-23 12:30:10 +0200165static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
166 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200167 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168{
169 PhysPageEntry *p;
170 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200171 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172
Avi Kivity07f07b32012-02-13 20:45:32 +0200173 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200174 lp->ptr = phys_map_node_alloc();
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200175 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200176 if (level == 0) {
177 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200178 p[i].is_leaf = 1;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200179 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200180 }
181 }
182 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200183 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200184 }
Avi Kivity29990972012-02-13 20:21:20 +0200185 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186
Avi Kivity29990972012-02-13 20:21:20 +0200187 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200188 if ((*index & (step - 1)) == 0 && *nb >= step) {
189 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200190 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200191 *index += step;
192 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200193 } else {
194 phys_page_set_level(lp, index, nb, leaf, level - 1);
195 }
196 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200197 }
198}
199
Avi Kivityac1970f2012-10-03 16:22:53 +0200200static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200201 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200202 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000203{
Avi Kivity29990972012-02-13 20:21:20 +0200204 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200205 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000206
Avi Kivityac1970f2012-10-03 16:22:53 +0200207 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000208}
209
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200210static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
211 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000212{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200213 PhysPageEntry *p;
214 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200215
Avi Kivity07f07b32012-02-13 20:45:32 +0200216 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200217 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200218 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200219 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200220 p = nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200221 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200222 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200223 return &sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200224}
225
Blue Swirle5548612012-04-21 13:08:33 +0000226bool memory_region_is_unassigned(MemoryRegion *mr)
227{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200228 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000229 && mr != &io_mem_watch;
230}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200231
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200232static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200233 hwaddr addr,
234 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200235{
Jan Kiszka90260c62013-05-26 21:46:51 +0200236 MemoryRegionSection *section;
237 subpage_t *subpage;
238
Paolo Bonzini0475d942013-05-29 12:28:21 +0200239 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
240 d->nodes, d->sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200241 if (resolve_subpage && section->mr->subpage) {
242 subpage = container_of(section->mr, subpage_t, iomem);
Paolo Bonzini0475d942013-05-29 12:28:21 +0200243 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200244 }
245 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200246}
247
Jan Kiszka90260c62013-05-26 21:46:51 +0200248static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200249address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200250 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200251{
252 MemoryRegionSection *section;
253 Int128 diff;
254
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200255 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200256 /* Compute offset within MemoryRegionSection */
257 addr -= section->offset_within_address_space;
258
259 /* Compute offset within MemoryRegion */
260 *xlat = addr + section->offset_within_region;
261
262 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100263 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200264 return section;
265}
Jan Kiszka90260c62013-05-26 21:46:51 +0200266
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200267MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
268 hwaddr *xlat, hwaddr *plen,
269 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200270{
Avi Kivity30951152012-10-30 13:47:46 +0200271 IOMMUTLBEntry iotlb;
272 MemoryRegionSection *section;
273 MemoryRegion *mr;
274 hwaddr len = *plen;
275
276 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200277 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200278 mr = section->mr;
279
280 if (!mr->iommu_ops) {
281 break;
282 }
283
284 iotlb = mr->iommu_ops->translate(mr, addr);
285 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
286 | (addr & iotlb.addr_mask));
287 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
288 if (!(iotlb.perm & (1 << is_write))) {
289 mr = &io_mem_unassigned;
290 break;
291 }
292
293 as = iotlb.target_as;
294 }
295
296 *plen = len;
297 *xlat = addr;
298 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200299}
300
301MemoryRegionSection *
302address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
303 hwaddr *plen)
304{
Avi Kivity30951152012-10-30 13:47:46 +0200305 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200306 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200307
308 assert(!section->mr->iommu_ops);
309 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200310}
bellard9fa3e852004-01-04 18:06:42 +0000311#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000312
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200313void cpu_exec_init_all(void)
314{
315#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700316 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200317 memory_map_init();
318 io_mem_init();
319#endif
320}
321
Andreas Färberb170fce2013-01-20 20:23:22 +0100322#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000323
Juan Quintelae59fb372009-09-29 22:48:21 +0200324static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200325{
Andreas Färber259186a2013-01-17 18:51:17 +0100326 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200327
aurel323098dba2009-03-07 21:28:24 +0000328 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
329 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100330 cpu->interrupt_request &= ~0x01;
331 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000332
333 return 0;
334}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200335
Andreas Färber1a1562f2013-06-17 04:09:11 +0200336const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200337 .name = "cpu_common",
338 .version_id = 1,
339 .minimum_version_id = 1,
340 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200341 .post_load = cpu_common_post_load,
342 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100343 VMSTATE_UINT32(halted, CPUState),
344 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200345 VMSTATE_END_OF_LIST()
346 }
347};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200348
pbrook9656f322008-07-01 20:01:19 +0000349#endif
350
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100351CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400352{
Andreas Färberbdc44642013-06-24 23:50:24 +0200353 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400354
Andreas Färberbdc44642013-06-24 23:50:24 +0200355 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100356 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200357 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100358 }
Glauber Costa950f1472009-06-09 12:15:18 -0400359 }
360
Andreas Färberbdc44642013-06-24 23:50:24 +0200361 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400362}
363
Andreas Färber9349b4f2012-03-14 01:38:32 +0100364void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000365{
Andreas Färber9f09e182012-05-03 06:59:07 +0200366 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100367 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200368 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000369 int cpu_index;
370
pbrookc2764712009-03-07 15:24:59 +0000371#if defined(CONFIG_USER_ONLY)
372 cpu_list_lock();
373#endif
bellard6a00d602005-11-21 23:25:50 +0000374 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200375 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000376 cpu_index++;
377 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100378 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100379 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000380 QTAILQ_INIT(&env->breakpoints);
381 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100382#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200383 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100384#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200385 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000386#if defined(CONFIG_USER_ONLY)
387 cpu_list_unlock();
388#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200389 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
390 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
391 }
pbrookb3c77242008-06-30 16:31:04 +0000392#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600393 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000394 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100395 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200396 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000397#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100398 if (cc->vmsd != NULL) {
399 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
400 }
bellardfd6ce8f2003-05-14 19:00:11 +0000401}
402
bellard1fddef42005-04-17 19:16:13 +0000403#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000404#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200405static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000406{
407 tb_invalidate_phys_page_range(pc, pc + 1, 0);
408}
409#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200410static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400411{
Andreas Färber00b941e2013-06-29 18:55:54 +0200412 tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu, pc) |
Max Filippov9d70c4b2012-05-27 20:21:08 +0400413 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +0400414}
bellardc27004e2005-01-03 23:35:10 +0000415#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000416#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000417
Paul Brookc527ee82010-03-01 03:31:14 +0000418#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100419void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000420
421{
422}
423
Andreas Färber9349b4f2012-03-14 01:38:32 +0100424int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000425 int flags, CPUWatchpoint **watchpoint)
426{
427 return -ENOSYS;
428}
429#else
pbrook6658ffb2007-03-16 23:58:11 +0000430/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100431int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000432 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000433{
aliguorib4051332008-11-18 20:14:20 +0000434 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000435 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000436
aliguorib4051332008-11-18 20:14:20 +0000437 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400438 if ((len & (len - 1)) || (addr & ~len_mask) ||
439 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000440 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
441 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
442 return -EINVAL;
443 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500444 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000445
aliguoria1d1bb32008-11-18 20:07:32 +0000446 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000447 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000448 wp->flags = flags;
449
aliguori2dc9f412008-11-18 20:56:59 +0000450 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000451 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000452 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000453 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000454 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000455
pbrook6658ffb2007-03-16 23:58:11 +0000456 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000457
458 if (watchpoint)
459 *watchpoint = wp;
460 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000461}
462
aliguoria1d1bb32008-11-18 20:07:32 +0000463/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100464int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000465 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000466{
aliguorib4051332008-11-18 20:14:20 +0000467 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000468 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000469
Blue Swirl72cf2d42009-09-12 07:36:22 +0000470 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000471 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000472 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000473 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000474 return 0;
475 }
476 }
aliguoria1d1bb32008-11-18 20:07:32 +0000477 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000478}
479
aliguoria1d1bb32008-11-18 20:07:32 +0000480/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100481void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000482{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000483 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000484
aliguoria1d1bb32008-11-18 20:07:32 +0000485 tlb_flush_page(env, watchpoint->vaddr);
486
Anthony Liguori7267c092011-08-20 22:09:37 -0500487 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000488}
489
aliguoria1d1bb32008-11-18 20:07:32 +0000490/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100491void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000492{
aliguoric0ce9982008-11-25 22:13:57 +0000493 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000494
Blue Swirl72cf2d42009-09-12 07:36:22 +0000495 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000496 if (wp->flags & mask)
497 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000498 }
aliguoria1d1bb32008-11-18 20:07:32 +0000499}
Paul Brookc527ee82010-03-01 03:31:14 +0000500#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000501
502/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100503int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000504 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000505{
bellard1fddef42005-04-17 19:16:13 +0000506#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000507 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000508
Anthony Liguori7267c092011-08-20 22:09:37 -0500509 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000510
511 bp->pc = pc;
512 bp->flags = flags;
513
aliguori2dc9f412008-11-18 20:56:59 +0000514 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200515 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000516 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200517 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000518 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200519 }
aliguoria1d1bb32008-11-18 20:07:32 +0000520
Andreas Färber00b941e2013-06-29 18:55:54 +0200521 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000522
Andreas Färber00b941e2013-06-29 18:55:54 +0200523 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000524 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200525 }
aliguoria1d1bb32008-11-18 20:07:32 +0000526 return 0;
527#else
528 return -ENOSYS;
529#endif
530}
531
532/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100533int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000534{
535#if defined(TARGET_HAS_ICE)
536 CPUBreakpoint *bp;
537
Blue Swirl72cf2d42009-09-12 07:36:22 +0000538 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000539 if (bp->pc == pc && bp->flags == flags) {
540 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000541 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000542 }
bellard4c3a88a2003-07-26 12:06:08 +0000543 }
aliguoria1d1bb32008-11-18 20:07:32 +0000544 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000545#else
aliguoria1d1bb32008-11-18 20:07:32 +0000546 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000547#endif
548}
549
aliguoria1d1bb32008-11-18 20:07:32 +0000550/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100551void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000552{
bellard1fddef42005-04-17 19:16:13 +0000553#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000554 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000555
Andreas Färber00b941e2013-06-29 18:55:54 +0200556 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000557
Anthony Liguori7267c092011-08-20 22:09:37 -0500558 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000559#endif
560}
561
562/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100563void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000564{
565#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000566 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000567
Blue Swirl72cf2d42009-09-12 07:36:22 +0000568 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000569 if (bp->flags & mask)
570 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000571 }
bellard4c3a88a2003-07-26 12:06:08 +0000572#endif
573}
574
bellardc33a3462003-07-29 20:50:33 +0000575/* enable or disable single step mode. EXCP_DEBUG is returned by the
576 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200577void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000578{
bellard1fddef42005-04-17 19:16:13 +0000579#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200580 if (cpu->singlestep_enabled != enabled) {
581 cpu->singlestep_enabled = enabled;
582 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200583 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200584 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100585 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000586 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200587 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000588 tb_flush(env);
589 }
bellardc33a3462003-07-29 20:50:33 +0000590 }
591#endif
592}
593
Andreas Färber9349b4f2012-03-14 01:38:32 +0100594void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000595{
Andreas Färber878096e2013-05-27 01:33:50 +0200596 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000597 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000598 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000599
600 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000601 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000602 fprintf(stderr, "qemu: fatal: ");
603 vfprintf(stderr, fmt, ap);
604 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200605 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000606 if (qemu_log_enabled()) {
607 qemu_log("qemu: fatal: ");
608 qemu_log_vprintf(fmt, ap2);
609 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200610 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000611 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000612 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000613 }
pbrook493ae1f2007-11-23 16:53:59 +0000614 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000615 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200616#if defined(CONFIG_USER_ONLY)
617 {
618 struct sigaction act;
619 sigfillset(&act.sa_mask);
620 act.sa_handler = SIG_DFL;
621 sigaction(SIGABRT, &act, NULL);
622 }
623#endif
bellard75012672003-06-21 13:11:07 +0000624 abort();
625}
626
bellard01243112004-01-04 15:48:17 +0000627#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200628static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
629{
630 RAMBlock *block;
631
632 /* The list is protected by the iothread lock here. */
633 block = ram_list.mru_block;
634 if (block && addr - block->offset < block->length) {
635 goto found;
636 }
637 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
638 if (addr - block->offset < block->length) {
639 goto found;
640 }
641 }
642
643 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
644 abort();
645
646found:
647 ram_list.mru_block = block;
648 return block;
649}
650
Juan Quintelad24981d2012-05-22 00:42:40 +0200651static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
652 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000653{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200654 RAMBlock *block;
655 ram_addr_t start1;
bellardf23db162005-08-21 19:12:28 +0000656
Paolo Bonzini041603f2013-09-09 17:49:45 +0200657 block = qemu_get_ram_block(start);
658 assert(block == qemu_get_ram_block(end - 1));
659 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000660 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200661}
662
663/* Note: start and end must be within the same ram block. */
664void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
665 int dirty_flags)
666{
667 uintptr_t length;
668
669 start &= TARGET_PAGE_MASK;
670 end = TARGET_PAGE_ALIGN(end);
671
672 length = end - start;
673 if (length == 0)
674 return;
675 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
676
677 if (tcg_enabled()) {
678 tlb_reset_dirty_range_all(start, end, length);
679 }
bellard1ccde1c2004-02-06 19:46:14 +0000680}
681
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000682static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000683{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200684 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000685 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200686 return ret;
aliguori74576192008-10-06 14:02:03 +0000687}
688
Avi Kivitya8170e52012-10-23 12:30:10 +0200689hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200690 MemoryRegionSection *section,
691 target_ulong vaddr,
692 hwaddr paddr, hwaddr xlat,
693 int prot,
694 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000695{
Avi Kivitya8170e52012-10-23 12:30:10 +0200696 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000697 CPUWatchpoint *wp;
698
Blue Swirlcc5bea62012-04-14 14:56:48 +0000699 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000700 /* Normal RAM. */
701 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200702 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000703 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200704 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000705 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200706 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000707 }
708 } else {
Paolo Bonzini0475d942013-05-29 12:28:21 +0200709 iotlb = section - address_space_memory.dispatch->sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200710 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000711 }
712
713 /* Make accesses to pages with watchpoints go via the
714 watchpoint trap routines. */
715 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
716 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
717 /* Avoid trapping reads of pages with a write breakpoint. */
718 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200719 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000720 *address |= TLB_MMIO;
721 break;
722 }
723 }
724 }
725
726 return iotlb;
727}
bellard9fa3e852004-01-04 18:06:42 +0000728#endif /* defined(CONFIG_USER_ONLY) */
729
pbrooke2eef172008-06-08 01:09:01 +0000730#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000731
Anthony Liguoric227f092009-10-01 16:12:16 -0500732static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200733 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200734static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200735
Stefan Weil575ddeb2013-09-29 20:56:45 +0200736static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200737
738/*
739 * Set a custom physical guest memory alloator.
740 * Accelerators with unusual needs may need this. Hopefully, we can
741 * get rid of it eventually.
742 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200743void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200744{
745 phys_mem_alloc = alloc;
746}
747
Avi Kivity5312bd82012-02-12 18:32:55 +0200748static uint16_t phys_section_add(MemoryRegionSection *section)
749{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200750 /* The physical section number is ORed with a page-aligned
751 * pointer to produce the iotlb entries. Thus it should
752 * never overflow into the page-aligned value.
753 */
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200754 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200755
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200756 if (next_map.sections_nb == next_map.sections_nb_alloc) {
757 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
758 16);
759 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
760 next_map.sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200761 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200762 next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200763 memory_region_ref(section->mr);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200764 return next_map.sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200765}
766
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200767static void phys_section_destroy(MemoryRegion *mr)
768{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200769 memory_region_unref(mr);
770
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200771 if (mr->subpage) {
772 subpage_t *subpage = container_of(mr, subpage_t, iomem);
773 memory_region_destroy(&subpage->iomem);
774 g_free(subpage);
775 }
776}
777
Paolo Bonzini60926662013-05-29 12:30:26 +0200778static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200779{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200780 while (map->sections_nb > 0) {
781 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200782 phys_section_destroy(section->mr);
783 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200784 g_free(map->sections);
785 g_free(map->nodes);
Paolo Bonzini60926662013-05-29 12:30:26 +0200786 g_free(map);
Avi Kivity5312bd82012-02-12 18:32:55 +0200787}
788
Avi Kivityac1970f2012-10-03 16:22:53 +0200789static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200790{
791 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200792 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200793 & TARGET_PAGE_MASK;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200794 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
795 next_map.nodes, next_map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200796 MemoryRegionSection subsection = {
797 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200798 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200799 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200800 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200801
Avi Kivityf3705d52012-03-08 16:16:34 +0200802 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200803
Avi Kivityf3705d52012-03-08 16:16:34 +0200804 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200805 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200806 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200807 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200808 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200809 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200810 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200811 }
812 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200813 end = start + int128_get64(section->size) - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200814 subpage_register(subpage, start, end, phys_section_add(section));
815}
816
817
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200818static void register_multipage(AddressSpaceDispatch *d,
819 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000820{
Avi Kivitya8170e52012-10-23 12:30:10 +0200821 hwaddr start_addr = section->offset_within_address_space;
Avi Kivity5312bd82012-02-12 18:32:55 +0200822 uint16_t section_index = phys_section_add(section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200823 uint64_t num_pages = int128_get64(int128_rshift(section->size,
824 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200825
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200826 assert(num_pages);
827 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000828}
829
Avi Kivityac1970f2012-10-03 16:22:53 +0200830static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200831{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200832 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200833 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200834 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200835 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200836
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200837 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
838 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
839 - now.offset_within_address_space;
840
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200841 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200842 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200843 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200844 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200845 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200846 while (int128_ne(remain.size, now.size)) {
847 remain.size = int128_sub(remain.size, now.size);
848 remain.offset_within_address_space += int128_get64(now.size);
849 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400850 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200851 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200852 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800853 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200854 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200855 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400856 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200857 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200858 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400859 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200860 }
861}
862
Sheng Yang62a27442010-01-26 19:21:16 +0800863void qemu_flush_coalesced_mmio_buffer(void)
864{
865 if (kvm_enabled())
866 kvm_flush_coalesced_mmio_buffer();
867}
868
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700869void qemu_mutex_lock_ramlist(void)
870{
871 qemu_mutex_lock(&ram_list.mutex);
872}
873
874void qemu_mutex_unlock_ramlist(void)
875{
876 qemu_mutex_unlock(&ram_list.mutex);
877}
878
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200879#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300880
881#include <sys/vfs.h>
882
883#define HUGETLBFS_MAGIC 0x958458f6
884
885static long gethugepagesize(const char *path)
886{
887 struct statfs fs;
888 int ret;
889
890 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900891 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300892 } while (ret != 0 && errno == EINTR);
893
894 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900895 perror(path);
896 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300897 }
898
899 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900900 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300901
902 return fs.f_bsize;
903}
904
Alex Williamson04b16652010-07-02 11:13:17 -0600905static void *file_ram_alloc(RAMBlock *block,
906 ram_addr_t memory,
907 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300908{
909 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500910 char *sanitized_name;
911 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300912 void *area;
913 int fd;
914#ifdef MAP_POPULATE
915 int flags;
916#endif
917 unsigned long hpagesize;
918
919 hpagesize = gethugepagesize(path);
920 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900921 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300922 }
923
924 if (memory < hpagesize) {
925 return NULL;
926 }
927
928 if (kvm_enabled() && !kvm_has_sync_mmu()) {
929 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
930 return NULL;
931 }
932
Peter Feiner8ca761f2013-03-04 13:54:25 -0500933 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
934 sanitized_name = g_strdup(block->mr->name);
935 for (c = sanitized_name; *c != '\0'; c++) {
936 if (*c == '/')
937 *c = '_';
938 }
939
940 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
941 sanitized_name);
942 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300943
944 fd = mkstemp(filename);
945 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900946 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100947 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900948 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300949 }
950 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100951 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300952
953 memory = (memory+hpagesize-1) & ~(hpagesize-1);
954
955 /*
956 * ftruncate is not supported by hugetlbfs in older
957 * hosts, so don't bother bailing out on errors.
958 * If anything goes wrong with it under other filesystems,
959 * mmap will fail.
960 */
961 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900962 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300963
964#ifdef MAP_POPULATE
965 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
966 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
967 * to sidestep this quirk.
968 */
969 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
970 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
971#else
972 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
973#endif
974 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900975 perror("file_ram_alloc: can't mmap RAM pages");
976 close(fd);
977 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300978 }
Alex Williamson04b16652010-07-02 11:13:17 -0600979 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300980 return area;
981}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200982#else
983static void *file_ram_alloc(RAMBlock *block,
984 ram_addr_t memory,
985 const char *path)
986{
987 fprintf(stderr, "-mem-path not supported on this host\n");
988 exit(1);
989}
Marcelo Tosattic9027602010-03-01 20:25:08 -0300990#endif
991
Alex Williamsond17b5282010-06-25 11:08:38 -0600992static ram_addr_t find_ram_offset(ram_addr_t size)
993{
Alex Williamson04b16652010-07-02 11:13:17 -0600994 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -0600995 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -0600996
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +0100997 assert(size != 0); /* it would hand out same offset multiple times */
998
Paolo Bonzinia3161032012-11-14 15:54:48 +0100999 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001000 return 0;
1001
Paolo Bonzinia3161032012-11-14 15:54:48 +01001002 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001003 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001004
1005 end = block->offset + block->length;
1006
Paolo Bonzinia3161032012-11-14 15:54:48 +01001007 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001008 if (next_block->offset >= end) {
1009 next = MIN(next, next_block->offset);
1010 }
1011 }
1012 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001013 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001014 mingap = next - end;
1015 }
1016 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001017
1018 if (offset == RAM_ADDR_MAX) {
1019 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1020 (uint64_t)size);
1021 abort();
1022 }
1023
Alex Williamson04b16652010-07-02 11:13:17 -06001024 return offset;
1025}
1026
Juan Quintela652d7ec2012-07-20 10:37:54 +02001027ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001028{
Alex Williamsond17b5282010-06-25 11:08:38 -06001029 RAMBlock *block;
1030 ram_addr_t last = 0;
1031
Paolo Bonzinia3161032012-11-14 15:54:48 +01001032 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001033 last = MAX(last, block->offset + block->length);
1034
1035 return last;
1036}
1037
Jason Baronddb97f12012-08-02 15:44:16 -04001038static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1039{
1040 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001041
1042 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001043 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1044 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001045 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1046 if (ret) {
1047 perror("qemu_madvise");
1048 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1049 "but dump_guest_core=off specified\n");
1050 }
1051 }
1052}
1053
Avi Kivityc5705a72011-12-20 15:59:12 +02001054void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001055{
1056 RAMBlock *new_block, *block;
1057
Avi Kivityc5705a72011-12-20 15:59:12 +02001058 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001059 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001060 if (block->offset == addr) {
1061 new_block = block;
1062 break;
1063 }
1064 }
1065 assert(new_block);
1066 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001067
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001068 if (dev) {
1069 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001070 if (id) {
1071 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001072 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001073 }
1074 }
1075 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1076
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001077 /* This assumes the iothread lock is taken here too. */
1078 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001079 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001080 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001081 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1082 new_block->idstr);
1083 abort();
1084 }
1085 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001086 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001087}
1088
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001089static int memory_try_enable_merging(void *addr, size_t len)
1090{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001091 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001092 /* disabled by the user */
1093 return 0;
1094 }
1095
1096 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1097}
1098
Avi Kivityc5705a72011-12-20 15:59:12 +02001099ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1100 MemoryRegion *mr)
1101{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001102 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001103
1104 size = TARGET_PAGE_ALIGN(size);
1105 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001106 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001107
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001108 /* This assumes the iothread lock is taken here too. */
1109 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001110 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001111 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001112 if (host) {
1113 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001114 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001115 } else if (xen_enabled()) {
1116 if (mem_path) {
1117 fprintf(stderr, "-mem-path not supported with Xen\n");
1118 exit(1);
1119 }
1120 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001121 } else {
1122 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001123 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1124 /*
1125 * file_ram_alloc() needs to allocate just like
1126 * phys_mem_alloc, but we haven't bothered to provide
1127 * a hook there.
1128 */
1129 fprintf(stderr,
1130 "-mem-path not supported with this accelerator\n");
1131 exit(1);
1132 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001133 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001134 }
1135 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001136 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001137 if (!new_block->host) {
1138 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1139 new_block->mr->name, strerror(errno));
1140 exit(1);
1141 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001142 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001143 }
1144 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001145 new_block->length = size;
1146
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001147 /* Keep the list sorted from biggest to smallest block. */
1148 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1149 if (block->length < new_block->length) {
1150 break;
1151 }
1152 }
1153 if (block) {
1154 QTAILQ_INSERT_BEFORE(block, new_block, next);
1155 } else {
1156 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1157 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001158 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001159
Umesh Deshpandef798b072011-08-18 11:41:17 -07001160 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001161 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001162
Anthony Liguori7267c092011-08-20 22:09:37 -05001163 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001164 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001165 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1166 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001167 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001168
Jason Baronddb97f12012-08-02 15:44:16 -04001169 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001170 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001171 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001172
Cam Macdonell84b89d72010-07-26 18:10:57 -06001173 if (kvm_enabled())
1174 kvm_setup_guest_memory(new_block->host, size);
1175
1176 return new_block->offset;
1177}
1178
Avi Kivityc5705a72011-12-20 15:59:12 +02001179ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001180{
Avi Kivityc5705a72011-12-20 15:59:12 +02001181 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001182}
bellarde9a1ab12007-02-08 23:08:38 +00001183
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001184void qemu_ram_free_from_ptr(ram_addr_t addr)
1185{
1186 RAMBlock *block;
1187
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001188 /* This assumes the iothread lock is taken here too. */
1189 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001190 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001191 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001192 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001193 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001194 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001195 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001196 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001197 }
1198 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001199 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001200}
1201
Anthony Liguoric227f092009-10-01 16:12:16 -05001202void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001203{
Alex Williamson04b16652010-07-02 11:13:17 -06001204 RAMBlock *block;
1205
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001206 /* This assumes the iothread lock is taken here too. */
1207 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001208 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001209 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001210 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001211 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001212 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001213 if (block->flags & RAM_PREALLOC_MASK) {
1214 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001215 } else if (xen_enabled()) {
1216 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001217#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001218 } else if (block->fd >= 0) {
1219 munmap(block->host, block->length);
1220 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001221#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001222 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001223 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001224 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001225 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001226 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001227 }
1228 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001229 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001230
bellarde9a1ab12007-02-08 23:08:38 +00001231}
1232
Huang Yingcd19cfa2011-03-02 08:56:19 +01001233#ifndef _WIN32
1234void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1235{
1236 RAMBlock *block;
1237 ram_addr_t offset;
1238 int flags;
1239 void *area, *vaddr;
1240
Paolo Bonzinia3161032012-11-14 15:54:48 +01001241 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001242 offset = addr - block->offset;
1243 if (offset < block->length) {
1244 vaddr = block->host + offset;
1245 if (block->flags & RAM_PREALLOC_MASK) {
1246 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001247 } else if (xen_enabled()) {
1248 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001249 } else {
1250 flags = MAP_FIXED;
1251 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001252 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001253#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001254 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1255 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001256#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001257 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001258#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001259 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1260 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001261 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001262 /*
1263 * Remap needs to match alloc. Accelerators that
1264 * set phys_mem_alloc never remap. If they did,
1265 * we'd need a remap hook here.
1266 */
1267 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1268
Huang Yingcd19cfa2011-03-02 08:56:19 +01001269 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1270 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1271 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001272 }
1273 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001274 fprintf(stderr, "Could not remap addr: "
1275 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001276 length, addr);
1277 exit(1);
1278 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001279 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001280 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001281 }
1282 return;
1283 }
1284 }
1285}
1286#endif /* !_WIN32 */
1287
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001288/* Return a host pointer to ram allocated with qemu_ram_alloc.
1289 With the exception of the softmmu code in this file, this should
1290 only be used for local memory (e.g. video ram) that the device owns,
1291 and knows it isn't going to access beyond the end of the block.
1292
1293 It should not be used for general purpose DMA.
1294 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1295 */
1296void *qemu_get_ram_ptr(ram_addr_t addr)
1297{
1298 RAMBlock *block = qemu_get_ram_block(addr);
1299
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001300 if (xen_enabled()) {
1301 /* We need to check if the requested address is in the RAM
1302 * because we don't want to map the entire memory in QEMU.
1303 * In that case just map until the end of the page.
1304 */
1305 if (block->offset == 0) {
1306 return xen_map_cache(addr, 0, 0);
1307 } else if (block->host == NULL) {
1308 block->host =
1309 xen_map_cache(block->offset, block->length, 1);
1310 }
1311 }
1312 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001313}
1314
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001315/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1316 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001317static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001318{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001319 if (*size == 0) {
1320 return NULL;
1321 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001322 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001323 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001324 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001325 RAMBlock *block;
1326
Paolo Bonzinia3161032012-11-14 15:54:48 +01001327 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001328 if (addr - block->offset < block->length) {
1329 if (addr - block->offset + *size > block->length)
1330 *size = block->length - addr + block->offset;
1331 return block->host + (addr - block->offset);
1332 }
1333 }
1334
1335 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1336 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001337 }
1338}
1339
Paolo Bonzini7443b432013-06-03 12:44:02 +02001340/* Some of the softmmu routines need to translate from a host pointer
1341 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001342MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001343{
pbrook94a6b542009-04-11 17:15:54 +00001344 RAMBlock *block;
1345 uint8_t *host = ptr;
1346
Jan Kiszka868bb332011-06-21 22:59:09 +02001347 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001348 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001349 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001350 }
1351
Paolo Bonzini23887b72013-05-06 14:28:39 +02001352 block = ram_list.mru_block;
1353 if (block && block->host && host - block->host < block->length) {
1354 goto found;
1355 }
1356
Paolo Bonzinia3161032012-11-14 15:54:48 +01001357 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001358 /* This case append when the block is not mapped. */
1359 if (block->host == NULL) {
1360 continue;
1361 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001362 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001363 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001364 }
pbrook94a6b542009-04-11 17:15:54 +00001365 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001366
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001367 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001368
1369found:
1370 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001371 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001372}
Alex Williamsonf471a172010-06-11 11:11:42 -06001373
Avi Kivitya8170e52012-10-23 12:30:10 +02001374static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001375 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001376{
bellard3a7d9292005-08-21 09:26:42 +00001377 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001378 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001379 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001380 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001381 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001382 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001383 switch (size) {
1384 case 1:
1385 stb_p(qemu_get_ram_ptr(ram_addr), val);
1386 break;
1387 case 2:
1388 stw_p(qemu_get_ram_ptr(ram_addr), val);
1389 break;
1390 case 4:
1391 stl_p(qemu_get_ram_ptr(ram_addr), val);
1392 break;
1393 default:
1394 abort();
1395 }
bellardf23db162005-08-21 19:12:28 +00001396 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001397 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001398 /* we remove the notdirty callback only if the code has been
1399 flushed */
Andreas Färber4917cf42013-05-27 05:17:50 +02001400 if (dirty_flags == 0xff) {
1401 CPUArchState *env = current_cpu->env_ptr;
1402 tlb_set_dirty(env, env->mem_io_vaddr);
1403 }
bellard1ccde1c2004-02-06 19:46:14 +00001404}
1405
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001406static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1407 unsigned size, bool is_write)
1408{
1409 return is_write;
1410}
1411
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001412static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001413 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001414 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001415 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001416};
1417
pbrook0f459d12008-06-09 00:20:13 +00001418/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001419static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001420{
Andreas Färber4917cf42013-05-27 05:17:50 +02001421 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001422 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001423 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001424 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001425 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001426
aliguori06d55cc2008-11-18 20:24:06 +00001427 if (env->watchpoint_hit) {
1428 /* We re-entered the check after replacing the TB. Now raise
1429 * the debug interrupt so that is will trigger after the
1430 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001431 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001432 return;
1433 }
pbrook2e70f6e2008-06-29 01:03:05 +00001434 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001435 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001436 if ((vaddr == (wp->vaddr & len_mask) ||
1437 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001438 wp->flags |= BP_WATCHPOINT_HIT;
1439 if (!env->watchpoint_hit) {
1440 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001441 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001442 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1443 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001444 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001445 } else {
1446 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1447 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001448 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001449 }
aliguori06d55cc2008-11-18 20:24:06 +00001450 }
aliguori6e140f22008-11-18 20:37:55 +00001451 } else {
1452 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001453 }
1454 }
1455}
1456
pbrook6658ffb2007-03-16 23:58:11 +00001457/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1458 so these check for a hit then pass through to the normal out-of-line
1459 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001460static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001461 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001462{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001463 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1464 switch (size) {
1465 case 1: return ldub_phys(addr);
1466 case 2: return lduw_phys(addr);
1467 case 4: return ldl_phys(addr);
1468 default: abort();
1469 }
pbrook6658ffb2007-03-16 23:58:11 +00001470}
1471
Avi Kivitya8170e52012-10-23 12:30:10 +02001472static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001473 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001474{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001475 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1476 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001477 case 1:
1478 stb_phys(addr, val);
1479 break;
1480 case 2:
1481 stw_phys(addr, val);
1482 break;
1483 case 4:
1484 stl_phys(addr, val);
1485 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001486 default: abort();
1487 }
pbrook6658ffb2007-03-16 23:58:11 +00001488}
1489
Avi Kivity1ec9b902012-01-02 12:47:48 +02001490static const MemoryRegionOps watch_mem_ops = {
1491 .read = watch_mem_read,
1492 .write = watch_mem_write,
1493 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001494};
pbrook6658ffb2007-03-16 23:58:11 +00001495
Avi Kivitya8170e52012-10-23 12:30:10 +02001496static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001497 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001498{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001499 subpage_t *subpage = opaque;
1500 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001501
blueswir1db7b5422007-05-26 17:36:03 +00001502#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001503 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001504 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001505#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001506 address_space_read(subpage->as, addr + subpage->base, buf, len);
1507 switch (len) {
1508 case 1:
1509 return ldub_p(buf);
1510 case 2:
1511 return lduw_p(buf);
1512 case 4:
1513 return ldl_p(buf);
1514 default:
1515 abort();
1516 }
blueswir1db7b5422007-05-26 17:36:03 +00001517}
1518
Avi Kivitya8170e52012-10-23 12:30:10 +02001519static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001520 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001521{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001522 subpage_t *subpage = opaque;
1523 uint8_t buf[4];
1524
blueswir1db7b5422007-05-26 17:36:03 +00001525#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001526 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001527 " value %"PRIx64"\n",
1528 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001529#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001530 switch (len) {
1531 case 1:
1532 stb_p(buf, value);
1533 break;
1534 case 2:
1535 stw_p(buf, value);
1536 break;
1537 case 4:
1538 stl_p(buf, value);
1539 break;
1540 default:
1541 abort();
1542 }
1543 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001544}
1545
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001546static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001547 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001548{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001549 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001550#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001551 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001552 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001553#endif
1554
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001555 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001556 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001557}
1558
Avi Kivity70c68e42012-01-02 12:32:48 +02001559static const MemoryRegionOps subpage_ops = {
1560 .read = subpage_read,
1561 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001562 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001563 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001564};
1565
Anthony Liguoric227f092009-10-01 16:12:16 -05001566static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001567 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001568{
1569 int idx, eidx;
1570
1571 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1572 return -1;
1573 idx = SUBPAGE_IDX(start);
1574 eidx = SUBPAGE_IDX(end);
1575#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001576 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1577 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001578#endif
blueswir1db7b5422007-05-26 17:36:03 +00001579 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001580 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001581 }
1582
1583 return 0;
1584}
1585
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001586static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001587{
Anthony Liguoric227f092009-10-01 16:12:16 -05001588 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001589
Anthony Liguori7267c092011-08-20 22:09:37 -05001590 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001591
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001592 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001593 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001594 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001595 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001596 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001597#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001598 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1599 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001600#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001601 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001602
1603 return mmio;
1604}
1605
Avi Kivity5312bd82012-02-12 18:32:55 +02001606static uint16_t dummy_section(MemoryRegion *mr)
1607{
1608 MemoryRegionSection section = {
1609 .mr = mr,
1610 .offset_within_address_space = 0,
1611 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001612 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001613 };
1614
1615 return phys_section_add(&section);
1616}
1617
Avi Kivitya8170e52012-10-23 12:30:10 +02001618MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001619{
Paolo Bonzini0475d942013-05-29 12:28:21 +02001620 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001621}
1622
Avi Kivitye9179ce2009-06-14 11:38:52 +03001623static void io_mem_init(void)
1624{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001625 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1626 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001627 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001628 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001629 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001630 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001631 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001632}
1633
Avi Kivityac1970f2012-10-03 16:22:53 +02001634static void mem_begin(MemoryListener *listener)
1635{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001636 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001637 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1638
1639 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1640 d->as = as;
1641 as->next_dispatch = d;
1642}
1643
1644static void mem_commit(MemoryListener *listener)
1645{
1646 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001647 AddressSpaceDispatch *cur = as->dispatch;
1648 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001649
Paolo Bonzini0475d942013-05-29 12:28:21 +02001650 next->nodes = next_map.nodes;
1651 next->sections = next_map.sections;
1652
1653 as->dispatch = next;
1654 g_free(cur);
Avi Kivityac1970f2012-10-03 16:22:53 +02001655}
1656
Avi Kivity50c1e142012-02-08 21:36:02 +02001657static void core_begin(MemoryListener *listener)
1658{
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001659 uint16_t n;
1660
Paolo Bonzini60926662013-05-29 12:30:26 +02001661 prev_map = g_new(PhysPageMap, 1);
1662 *prev_map = next_map;
1663
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001664 memset(&next_map, 0, sizeof(next_map));
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001665 n = dummy_section(&io_mem_unassigned);
1666 assert(n == PHYS_SECTION_UNASSIGNED);
1667 n = dummy_section(&io_mem_notdirty);
1668 assert(n == PHYS_SECTION_NOTDIRTY);
1669 n = dummy_section(&io_mem_rom);
1670 assert(n == PHYS_SECTION_ROM);
1671 n = dummy_section(&io_mem_watch);
1672 assert(n == PHYS_SECTION_WATCH);
Avi Kivity50c1e142012-02-08 21:36:02 +02001673}
1674
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001675/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1676 * All AddressSpaceDispatch instances have switched to the next map.
1677 */
1678static void core_commit(MemoryListener *listener)
1679{
Paolo Bonzini60926662013-05-29 12:30:26 +02001680 phys_sections_free(prev_map);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001681}
1682
Avi Kivity1d711482012-10-02 18:54:45 +02001683static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001684{
Andreas Färber182735e2013-05-29 22:29:20 +02001685 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001686
1687 /* since each CPU stores ram addresses in its TLB cache, we must
1688 reset the modified entries */
1689 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001690 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001691 CPUArchState *env = cpu->env_ptr;
1692
Avi Kivity117712c2012-02-12 21:23:17 +02001693 tlb_flush(env, 1);
1694 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001695}
1696
Avi Kivity93632742012-02-08 16:54:16 +02001697static void core_log_global_start(MemoryListener *listener)
1698{
1699 cpu_physical_memory_set_dirty_tracking(1);
1700}
1701
1702static void core_log_global_stop(MemoryListener *listener)
1703{
1704 cpu_physical_memory_set_dirty_tracking(0);
1705}
1706
Avi Kivity93632742012-02-08 16:54:16 +02001707static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001708 .begin = core_begin,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001709 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02001710 .log_global_start = core_log_global_start,
1711 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001712 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001713};
1714
Avi Kivity1d711482012-10-02 18:54:45 +02001715static MemoryListener tcg_memory_listener = {
1716 .commit = tcg_commit,
1717};
1718
Avi Kivityac1970f2012-10-03 16:22:53 +02001719void address_space_init_dispatch(AddressSpace *as)
1720{
Paolo Bonzini00752702013-05-29 12:13:54 +02001721 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001722 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001723 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001724 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001725 .region_add = mem_add,
1726 .region_nop = mem_add,
1727 .priority = 0,
1728 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001729 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001730}
1731
Avi Kivity83f3c252012-10-07 12:59:55 +02001732void address_space_destroy_dispatch(AddressSpace *as)
1733{
1734 AddressSpaceDispatch *d = as->dispatch;
1735
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001736 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001737 g_free(d);
1738 as->dispatch = NULL;
1739}
1740
Avi Kivity62152b82011-07-26 14:26:14 +03001741static void memory_map_init(void)
1742{
Anthony Liguori7267c092011-08-20 22:09:37 -05001743 system_memory = g_malloc(sizeof(*system_memory));
Michael S. Tsirkin818f86b2013-11-04 08:06:08 +02001744
1745 assert(TARGET_PHYS_ADDR_SPACE_BITS <= 64);
1746
1747 memory_region_init(system_memory, NULL, "system",
1748 TARGET_PHYS_ADDR_SPACE_BITS == 64 ?
1749 UINT64_MAX : (0x1ULL << TARGET_PHYS_ADDR_SPACE_BITS));
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001750 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001751
Anthony Liguori7267c092011-08-20 22:09:37 -05001752 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001753 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1754 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001755 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001756
Avi Kivityf6790af2012-10-02 20:13:51 +02001757 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001758 if (tcg_enabled()) {
1759 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1760 }
Avi Kivity62152b82011-07-26 14:26:14 +03001761}
1762
1763MemoryRegion *get_system_memory(void)
1764{
1765 return system_memory;
1766}
1767
Avi Kivity309cb472011-08-08 16:09:03 +03001768MemoryRegion *get_system_io(void)
1769{
1770 return system_io;
1771}
1772
pbrooke2eef172008-06-08 01:09:01 +00001773#endif /* !defined(CONFIG_USER_ONLY) */
1774
bellard13eb76e2004-01-24 15:23:36 +00001775/* physical memory access (slow version, mainly for debug) */
1776#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001777int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001778 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001779{
1780 int l, flags;
1781 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001782 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001783
1784 while (len > 0) {
1785 page = addr & TARGET_PAGE_MASK;
1786 l = (page + TARGET_PAGE_SIZE) - addr;
1787 if (l > len)
1788 l = len;
1789 flags = page_get_flags(page);
1790 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001791 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001792 if (is_write) {
1793 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001794 return -1;
bellard579a97f2007-11-11 14:26:47 +00001795 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001796 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001797 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001798 memcpy(p, buf, l);
1799 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001800 } else {
1801 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001802 return -1;
bellard579a97f2007-11-11 14:26:47 +00001803 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001804 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001805 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001806 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001807 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001808 }
1809 len -= l;
1810 buf += l;
1811 addr += l;
1812 }
Paul Brooka68fe892010-03-01 00:08:59 +00001813 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001814}
bellard8df1cd02005-01-28 22:37:22 +00001815
bellard13eb76e2004-01-24 15:23:36 +00001816#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001817
Avi Kivitya8170e52012-10-23 12:30:10 +02001818static void invalidate_and_set_dirty(hwaddr addr,
1819 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001820{
1821 if (!cpu_physical_memory_is_dirty(addr)) {
1822 /* invalidate code */
1823 tb_invalidate_phys_page_range(addr, addr + length, 0);
1824 /* set dirty bit */
1825 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1826 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001827 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001828}
1829
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001830static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1831{
1832 if (memory_region_is_ram(mr)) {
1833 return !(is_write && mr->readonly);
1834 }
1835 if (memory_region_is_romd(mr)) {
1836 return !is_write;
1837 }
1838
1839 return false;
1840}
1841
Richard Henderson23326162013-07-08 14:55:59 -07001842static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001843{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001844 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001845
1846 /* Regions are assumed to support 1-4 byte accesses unless
1847 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001848 if (access_size_max == 0) {
1849 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001850 }
Richard Henderson23326162013-07-08 14:55:59 -07001851
1852 /* Bound the maximum access by the alignment of the address. */
1853 if (!mr->ops->impl.unaligned) {
1854 unsigned align_size_max = addr & -addr;
1855 if (align_size_max != 0 && align_size_max < access_size_max) {
1856 access_size_max = align_size_max;
1857 }
1858 }
1859
1860 /* Don't attempt accesses larger than the maximum. */
1861 if (l > access_size_max) {
1862 l = access_size_max;
1863 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001864 if (l & (l - 1)) {
1865 l = 1 << (qemu_fls(l) - 1);
1866 }
Richard Henderson23326162013-07-08 14:55:59 -07001867
1868 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001869}
1870
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001871bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001872 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001873{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001874 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001875 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001876 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001877 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001878 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001879 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001880
bellard13eb76e2004-01-24 15:23:36 +00001881 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001882 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001883 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001884
bellard13eb76e2004-01-24 15:23:36 +00001885 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001886 if (!memory_access_is_direct(mr, is_write)) {
1887 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001888 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001889 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001890 switch (l) {
1891 case 8:
1892 /* 64 bit write access */
1893 val = ldq_p(buf);
1894 error |= io_mem_write(mr, addr1, val, 8);
1895 break;
1896 case 4:
bellard1c213d12005-09-03 10:49:04 +00001897 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001898 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001899 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07001900 break;
1901 case 2:
bellard1c213d12005-09-03 10:49:04 +00001902 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001903 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001904 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07001905 break;
1906 case 1:
bellard1c213d12005-09-03 10:49:04 +00001907 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001908 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001909 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07001910 break;
1911 default:
1912 abort();
bellard13eb76e2004-01-24 15:23:36 +00001913 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001914 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001915 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00001916 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001917 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001918 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001919 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001920 }
1921 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001922 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00001923 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001924 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07001925 switch (l) {
1926 case 8:
1927 /* 64 bit read access */
1928 error |= io_mem_read(mr, addr1, &val, 8);
1929 stq_p(buf, val);
1930 break;
1931 case 4:
bellard13eb76e2004-01-24 15:23:36 +00001932 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001933 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00001934 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001935 break;
1936 case 2:
bellard13eb76e2004-01-24 15:23:36 +00001937 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001938 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00001939 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001940 break;
1941 case 1:
bellard1c213d12005-09-03 10:49:04 +00001942 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001943 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00001944 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001945 break;
1946 default:
1947 abort();
bellard13eb76e2004-01-24 15:23:36 +00001948 }
1949 } else {
1950 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001951 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02001952 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001953 }
1954 }
1955 len -= l;
1956 buf += l;
1957 addr += l;
1958 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001959
1960 return error;
bellard13eb76e2004-01-24 15:23:36 +00001961}
bellard8df1cd02005-01-28 22:37:22 +00001962
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001963bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02001964 const uint8_t *buf, int len)
1965{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001966 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02001967}
1968
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001969bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02001970{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001971 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02001972}
1973
1974
Avi Kivitya8170e52012-10-23 12:30:10 +02001975void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001976 int len, int is_write)
1977{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001978 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02001979}
1980
bellardd0ecd2a2006-04-23 17:14:48 +00001981/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02001982void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00001983 const uint8_t *buf, int len)
1984{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001985 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00001986 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001987 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001988 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00001989
bellardd0ecd2a2006-04-23 17:14:48 +00001990 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001991 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001992 mr = address_space_translate(&address_space_memory,
1993 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00001994
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001995 if (!(memory_region_is_ram(mr) ||
1996 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00001997 /* do nothing */
1998 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001999 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002000 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002001 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002002 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002003 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002004 }
2005 len -= l;
2006 buf += l;
2007 addr += l;
2008 }
2009}
2010
aliguori6d16c2f2009-01-22 16:59:11 +00002011typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002012 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002013 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002014 hwaddr addr;
2015 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002016} BounceBuffer;
2017
2018static BounceBuffer bounce;
2019
aliguoriba223c22009-01-22 16:59:16 +00002020typedef struct MapClient {
2021 void *opaque;
2022 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002023 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002024} MapClient;
2025
Blue Swirl72cf2d42009-09-12 07:36:22 +00002026static QLIST_HEAD(map_client_list, MapClient) map_client_list
2027 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002028
2029void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2030{
Anthony Liguori7267c092011-08-20 22:09:37 -05002031 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002032
2033 client->opaque = opaque;
2034 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002035 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002036 return client;
2037}
2038
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002039static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002040{
2041 MapClient *client = (MapClient *)_client;
2042
Blue Swirl72cf2d42009-09-12 07:36:22 +00002043 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002044 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002045}
2046
2047static void cpu_notify_map_clients(void)
2048{
2049 MapClient *client;
2050
Blue Swirl72cf2d42009-09-12 07:36:22 +00002051 while (!QLIST_EMPTY(&map_client_list)) {
2052 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002053 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002054 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002055 }
2056}
2057
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002058bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2059{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002060 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002061 hwaddr l, xlat;
2062
2063 while (len > 0) {
2064 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002065 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2066 if (!memory_access_is_direct(mr, is_write)) {
2067 l = memory_access_size(mr, l, addr);
2068 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002069 return false;
2070 }
2071 }
2072
2073 len -= l;
2074 addr += l;
2075 }
2076 return true;
2077}
2078
aliguori6d16c2f2009-01-22 16:59:11 +00002079/* Map a physical memory region into a host virtual address.
2080 * May map a subset of the requested range, given by and returned in *plen.
2081 * May return NULL if resources needed to perform the mapping are exhausted.
2082 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002083 * Use cpu_register_map_client() to know when retrying the map operation is
2084 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002085 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002086void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002087 hwaddr addr,
2088 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002089 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002090{
Avi Kivitya8170e52012-10-23 12:30:10 +02002091 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002092 hwaddr done = 0;
2093 hwaddr l, xlat, base;
2094 MemoryRegion *mr, *this_mr;
2095 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002096
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002097 if (len == 0) {
2098 return NULL;
2099 }
aliguori6d16c2f2009-01-22 16:59:11 +00002100
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002101 l = len;
2102 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2103 if (!memory_access_is_direct(mr, is_write)) {
2104 if (bounce.buffer) {
2105 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002106 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002107 /* Avoid unbounded allocations */
2108 l = MIN(l, TARGET_PAGE_SIZE);
2109 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002110 bounce.addr = addr;
2111 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002112
2113 memory_region_ref(mr);
2114 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002115 if (!is_write) {
2116 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002117 }
aliguori6d16c2f2009-01-22 16:59:11 +00002118
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002119 *plen = l;
2120 return bounce.buffer;
2121 }
2122
2123 base = xlat;
2124 raddr = memory_region_get_ram_addr(mr);
2125
2126 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002127 len -= l;
2128 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002129 done += l;
2130 if (len == 0) {
2131 break;
2132 }
2133
2134 l = len;
2135 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2136 if (this_mr != mr || xlat != base + done) {
2137 break;
2138 }
aliguori6d16c2f2009-01-22 16:59:11 +00002139 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002140
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002141 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002142 *plen = done;
2143 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002144}
2145
Avi Kivityac1970f2012-10-03 16:22:53 +02002146/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002147 * Will also mark the memory as dirty if is_write == 1. access_len gives
2148 * the amount of memory that was actually read or written by the caller.
2149 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002150void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2151 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002152{
2153 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002154 MemoryRegion *mr;
2155 ram_addr_t addr1;
2156
2157 mr = qemu_ram_addr_from_host(buffer, &addr1);
2158 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002159 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002160 while (access_len) {
2161 unsigned l;
2162 l = TARGET_PAGE_SIZE;
2163 if (l > access_len)
2164 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002165 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002166 addr1 += l;
2167 access_len -= l;
2168 }
2169 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002170 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002171 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002172 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002173 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002174 return;
2175 }
2176 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002177 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002178 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002179 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002180 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002181 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002182 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002183}
bellardd0ecd2a2006-04-23 17:14:48 +00002184
Avi Kivitya8170e52012-10-23 12:30:10 +02002185void *cpu_physical_memory_map(hwaddr addr,
2186 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002187 int is_write)
2188{
2189 return address_space_map(&address_space_memory, addr, plen, is_write);
2190}
2191
Avi Kivitya8170e52012-10-23 12:30:10 +02002192void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2193 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002194{
2195 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2196}
2197
bellard8df1cd02005-01-28 22:37:22 +00002198/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002199static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002200 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002201{
bellard8df1cd02005-01-28 22:37:22 +00002202 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002203 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002204 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002205 hwaddr l = 4;
2206 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002207
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002208 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2209 false);
2210 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002211 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002212 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002213#if defined(TARGET_WORDS_BIGENDIAN)
2214 if (endian == DEVICE_LITTLE_ENDIAN) {
2215 val = bswap32(val);
2216 }
2217#else
2218 if (endian == DEVICE_BIG_ENDIAN) {
2219 val = bswap32(val);
2220 }
2221#endif
bellard8df1cd02005-01-28 22:37:22 +00002222 } else {
2223 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002224 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002225 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002226 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002227 switch (endian) {
2228 case DEVICE_LITTLE_ENDIAN:
2229 val = ldl_le_p(ptr);
2230 break;
2231 case DEVICE_BIG_ENDIAN:
2232 val = ldl_be_p(ptr);
2233 break;
2234 default:
2235 val = ldl_p(ptr);
2236 break;
2237 }
bellard8df1cd02005-01-28 22:37:22 +00002238 }
2239 return val;
2240}
2241
Avi Kivitya8170e52012-10-23 12:30:10 +02002242uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002243{
2244 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2245}
2246
Avi Kivitya8170e52012-10-23 12:30:10 +02002247uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002248{
2249 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2250}
2251
Avi Kivitya8170e52012-10-23 12:30:10 +02002252uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002253{
2254 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2255}
2256
bellard84b7b8e2005-11-28 21:19:04 +00002257/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002258static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002259 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002260{
bellard84b7b8e2005-11-28 21:19:04 +00002261 uint8_t *ptr;
2262 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002263 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002264 hwaddr l = 8;
2265 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002266
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002267 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2268 false);
2269 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002270 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002271 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002272#if defined(TARGET_WORDS_BIGENDIAN)
2273 if (endian == DEVICE_LITTLE_ENDIAN) {
2274 val = bswap64(val);
2275 }
2276#else
2277 if (endian == DEVICE_BIG_ENDIAN) {
2278 val = bswap64(val);
2279 }
2280#endif
bellard84b7b8e2005-11-28 21:19:04 +00002281 } else {
2282 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002283 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002284 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002285 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002286 switch (endian) {
2287 case DEVICE_LITTLE_ENDIAN:
2288 val = ldq_le_p(ptr);
2289 break;
2290 case DEVICE_BIG_ENDIAN:
2291 val = ldq_be_p(ptr);
2292 break;
2293 default:
2294 val = ldq_p(ptr);
2295 break;
2296 }
bellard84b7b8e2005-11-28 21:19:04 +00002297 }
2298 return val;
2299}
2300
Avi Kivitya8170e52012-10-23 12:30:10 +02002301uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002302{
2303 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2304}
2305
Avi Kivitya8170e52012-10-23 12:30:10 +02002306uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002307{
2308 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2309}
2310
Avi Kivitya8170e52012-10-23 12:30:10 +02002311uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002312{
2313 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2314}
2315
bellardaab33092005-10-30 20:48:42 +00002316/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002317uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002318{
2319 uint8_t val;
2320 cpu_physical_memory_read(addr, &val, 1);
2321 return val;
2322}
2323
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002324/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002325static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002326 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002327{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002328 uint8_t *ptr;
2329 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002330 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002331 hwaddr l = 2;
2332 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002333
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002334 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2335 false);
2336 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002337 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002338 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002339#if defined(TARGET_WORDS_BIGENDIAN)
2340 if (endian == DEVICE_LITTLE_ENDIAN) {
2341 val = bswap16(val);
2342 }
2343#else
2344 if (endian == DEVICE_BIG_ENDIAN) {
2345 val = bswap16(val);
2346 }
2347#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002348 } else {
2349 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002350 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002351 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002352 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002353 switch (endian) {
2354 case DEVICE_LITTLE_ENDIAN:
2355 val = lduw_le_p(ptr);
2356 break;
2357 case DEVICE_BIG_ENDIAN:
2358 val = lduw_be_p(ptr);
2359 break;
2360 default:
2361 val = lduw_p(ptr);
2362 break;
2363 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002364 }
2365 return val;
bellardaab33092005-10-30 20:48:42 +00002366}
2367
Avi Kivitya8170e52012-10-23 12:30:10 +02002368uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002369{
2370 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2371}
2372
Avi Kivitya8170e52012-10-23 12:30:10 +02002373uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002374{
2375 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2376}
2377
Avi Kivitya8170e52012-10-23 12:30:10 +02002378uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002379{
2380 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2381}
2382
bellard8df1cd02005-01-28 22:37:22 +00002383/* warning: addr must be aligned. The ram page is not masked as dirty
2384 and the code inside is not invalidated. It is useful if the dirty
2385 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002386void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002387{
bellard8df1cd02005-01-28 22:37:22 +00002388 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002389 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002390 hwaddr l = 4;
2391 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002392
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002393 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2394 true);
2395 if (l < 4 || !memory_access_is_direct(mr, true)) {
2396 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002397 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002398 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002399 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002400 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002401
2402 if (unlikely(in_migration)) {
2403 if (!cpu_physical_memory_is_dirty(addr1)) {
2404 /* invalidate code */
2405 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2406 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002407 cpu_physical_memory_set_dirty_flags(
2408 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002409 }
2410 }
bellard8df1cd02005-01-28 22:37:22 +00002411 }
2412}
2413
2414/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002415static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002416 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002417{
bellard8df1cd02005-01-28 22:37:22 +00002418 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002419 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002420 hwaddr l = 4;
2421 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002422
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002423 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2424 true);
2425 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002426#if defined(TARGET_WORDS_BIGENDIAN)
2427 if (endian == DEVICE_LITTLE_ENDIAN) {
2428 val = bswap32(val);
2429 }
2430#else
2431 if (endian == DEVICE_BIG_ENDIAN) {
2432 val = bswap32(val);
2433 }
2434#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002435 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002436 } else {
bellard8df1cd02005-01-28 22:37:22 +00002437 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002438 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002439 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002440 switch (endian) {
2441 case DEVICE_LITTLE_ENDIAN:
2442 stl_le_p(ptr, val);
2443 break;
2444 case DEVICE_BIG_ENDIAN:
2445 stl_be_p(ptr, val);
2446 break;
2447 default:
2448 stl_p(ptr, val);
2449 break;
2450 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002451 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002452 }
2453}
2454
Avi Kivitya8170e52012-10-23 12:30:10 +02002455void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002456{
2457 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2458}
2459
Avi Kivitya8170e52012-10-23 12:30:10 +02002460void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002461{
2462 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2463}
2464
Avi Kivitya8170e52012-10-23 12:30:10 +02002465void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466{
2467 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2468}
2469
bellardaab33092005-10-30 20:48:42 +00002470/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002471void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002472{
2473 uint8_t v = val;
2474 cpu_physical_memory_write(addr, &v, 1);
2475}
2476
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002477/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002478static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002479 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002480{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002481 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002482 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002483 hwaddr l = 2;
2484 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002485
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002486 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2487 true);
2488 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002489#if defined(TARGET_WORDS_BIGENDIAN)
2490 if (endian == DEVICE_LITTLE_ENDIAN) {
2491 val = bswap16(val);
2492 }
2493#else
2494 if (endian == DEVICE_BIG_ENDIAN) {
2495 val = bswap16(val);
2496 }
2497#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002498 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002499 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002500 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002501 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002502 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002503 switch (endian) {
2504 case DEVICE_LITTLE_ENDIAN:
2505 stw_le_p(ptr, val);
2506 break;
2507 case DEVICE_BIG_ENDIAN:
2508 stw_be_p(ptr, val);
2509 break;
2510 default:
2511 stw_p(ptr, val);
2512 break;
2513 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002514 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002515 }
bellardaab33092005-10-30 20:48:42 +00002516}
2517
Avi Kivitya8170e52012-10-23 12:30:10 +02002518void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002519{
2520 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2521}
2522
Avi Kivitya8170e52012-10-23 12:30:10 +02002523void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002524{
2525 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2526}
2527
Avi Kivitya8170e52012-10-23 12:30:10 +02002528void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002529{
2530 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2531}
2532
bellardaab33092005-10-30 20:48:42 +00002533/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002534void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002535{
2536 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002537 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002538}
2539
Avi Kivitya8170e52012-10-23 12:30:10 +02002540void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002541{
2542 val = cpu_to_le64(val);
2543 cpu_physical_memory_write(addr, &val, 8);
2544}
2545
Avi Kivitya8170e52012-10-23 12:30:10 +02002546void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002547{
2548 val = cpu_to_be64(val);
2549 cpu_physical_memory_write(addr, &val, 8);
2550}
2551
aliguori5e2972f2009-03-28 17:51:36 +00002552/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002553int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002554 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002555{
2556 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002557 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002558 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002559
2560 while (len > 0) {
2561 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002562 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002563 /* if no physical page mapped, return an error */
2564 if (phys_addr == -1)
2565 return -1;
2566 l = (page + TARGET_PAGE_SIZE) - addr;
2567 if (l > len)
2568 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002569 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002570 if (is_write)
2571 cpu_physical_memory_write_rom(phys_addr, buf, l);
2572 else
aliguori5e2972f2009-03-28 17:51:36 +00002573 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002574 len -= l;
2575 buf += l;
2576 addr += l;
2577 }
2578 return 0;
2579}
Paul Brooka68fe892010-03-01 00:08:59 +00002580#endif
bellard13eb76e2004-01-24 15:23:36 +00002581
Blue Swirl8e4a4242013-01-06 18:30:17 +00002582#if !defined(CONFIG_USER_ONLY)
2583
2584/*
2585 * A helper function for the _utterly broken_ virtio device model to find out if
2586 * it's running on a big endian machine. Don't do this at home kids!
2587 */
2588bool virtio_is_big_endian(void);
2589bool virtio_is_big_endian(void)
2590{
2591#if defined(TARGET_WORDS_BIGENDIAN)
2592 return true;
2593#else
2594 return false;
2595#endif
2596}
2597
2598#endif
2599
Wen Congyang76f35532012-05-07 12:04:18 +08002600#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002601bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002602{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002603 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002604 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002605
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002606 mr = address_space_translate(&address_space_memory,
2607 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002608
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002609 return !(memory_region_is_ram(mr) ||
2610 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002611}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002612
2613void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2614{
2615 RAMBlock *block;
2616
2617 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2618 func(block->host, block->offset, block->length, opaque);
2619 }
2620}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002621#endif