blob: 060f3f3430ea0fc5048f498a80edcdbc8f3dedd6 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
blueswir1db7b5422007-05-26 17:36:03 +000054//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000055
pbrook99773bd2006-04-16 15:14:59 +000056#if !defined(CONFIG_USER_ONLY)
aliguori74576192008-10-06 14:02:03 +000057static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000058
Paolo Bonzinia3161032012-11-14 15:54:48 +010059RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030060
61static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030062static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030063
Avi Kivityf6790af2012-10-02 20:13:51 +020064AddressSpace address_space_io;
65AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020066
Paolo Bonzini0844e002013-05-24 14:37:28 +020067MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020068static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020069
pbrooke2eef172008-06-08 01:09:01 +000070#endif
bellard9fa3e852004-01-04 18:06:42 +000071
Andreas Färberbdc44642013-06-24 23:50:24 +020072struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000073/* current CPU in the current thread. It is only valid inside
74 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020075DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000076/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000077 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000078 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010079int use_icount;
bellard6a00d602005-11-21 23:25:50 +000080
pbrooke2eef172008-06-08 01:09:01 +000081#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020082
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020083typedef struct PhysPageEntry PhysPageEntry;
84
85struct PhysPageEntry {
86 uint16_t is_leaf : 1;
87 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
88 uint16_t ptr : 15;
89};
90
Paolo Bonzini03f49952013-11-07 17:14:36 +010091/* Size of the L2 (and L3, etc) page tables. */
92#define ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
93
94#define P_L2_BITS 10
95#define P_L2_SIZE (1 << P_L2_BITS)
96
97#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
98
99typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200100
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200101struct AddressSpaceDispatch {
102 /* This is a multi-level map on the physical address space.
103 * The bottom level has pointers to MemoryRegionSections.
104 */
105 PhysPageEntry phys_map;
Paolo Bonzini0475d942013-05-29 12:28:21 +0200106 Node *nodes;
107 MemoryRegionSection *sections;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200108 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200109};
110
Jan Kiszka90260c62013-05-26 21:46:51 +0200111#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
112typedef struct subpage_t {
113 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200114 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200115 hwaddr base;
116 uint16_t sub_section[TARGET_PAGE_SIZE];
117} subpage_t;
118
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200119#define PHYS_SECTION_UNASSIGNED 0
120#define PHYS_SECTION_NOTDIRTY 1
121#define PHYS_SECTION_ROM 2
122#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200123
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200124typedef struct PhysPageMap {
125 unsigned sections_nb;
126 unsigned sections_nb_alloc;
127 unsigned nodes_nb;
128 unsigned nodes_nb_alloc;
129 Node *nodes;
130 MemoryRegionSection *sections;
131} PhysPageMap;
132
Paolo Bonzini60926662013-05-29 12:30:26 +0200133static PhysPageMap *prev_map;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200134static PhysPageMap next_map;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200135
Avi Kivity07f07b32012-02-13 20:45:32 +0200136#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200137
pbrooke2eef172008-06-08 01:09:01 +0000138static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300139static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000140
Avi Kivity1ec9b902012-01-02 12:47:48 +0200141static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000142#endif
bellard54936002003-05-13 00:25:15 +0000143
Paul Brook6d9a1302010-02-28 23:55:53 +0000144#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200145
Avi Kivityf7bf5462012-02-13 20:12:05 +0200146static void phys_map_node_reserve(unsigned nodes)
147{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200148 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
149 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
150 16);
151 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
152 next_map.nodes_nb + nodes);
153 next_map.nodes = g_renew(Node, next_map.nodes,
154 next_map.nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200155 }
156}
157
158static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200159{
160 unsigned i;
161 uint16_t ret;
162
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200163 ret = next_map.nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200164 assert(ret != PHYS_MAP_NODE_NIL);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200165 assert(ret != next_map.nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100166 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200167 next_map.nodes[ret][i].is_leaf = 0;
168 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200169 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200170 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200171}
172
Avi Kivitya8170e52012-10-23 12:30:10 +0200173static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
174 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200175 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200176{
177 PhysPageEntry *p;
178 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100179 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200180
Avi Kivity07f07b32012-02-13 20:45:32 +0200181 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200182 lp->ptr = phys_map_node_alloc();
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200183 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200184 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100185 for (i = 0; i < P_L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200186 p[i].is_leaf = 1;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200187 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188 }
189 }
190 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200191 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100193 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200194
Paolo Bonzini03f49952013-11-07 17:14:36 +0100195 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200196 if ((*index & (step - 1)) == 0 && *nb >= step) {
197 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200198 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200199 *index += step;
200 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200201 } else {
202 phys_page_set_level(lp, index, nb, leaf, level - 1);
203 }
204 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200205 }
206}
207
Avi Kivityac1970f2012-10-03 16:22:53 +0200208static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200209 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200210 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000211{
Avi Kivity29990972012-02-13 20:21:20 +0200212 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200213 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000214
Avi Kivityac1970f2012-10-03 16:22:53 +0200215 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000216}
217
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200218static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
219 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000220{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200221 PhysPageEntry *p;
222 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200223
Avi Kivity07f07b32012-02-13 20:45:32 +0200224 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200225 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200226 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200227 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200228 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200230 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200231 return &sections[lp.ptr];
Avi Kivityf3705d52012-03-08 16:16:34 +0200232}
233
Blue Swirle5548612012-04-21 13:08:33 +0000234bool memory_region_is_unassigned(MemoryRegion *mr)
235{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200236 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000237 && mr != &io_mem_watch;
238}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200239
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200240static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200241 hwaddr addr,
242 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200243{
Jan Kiszka90260c62013-05-26 21:46:51 +0200244 MemoryRegionSection *section;
245 subpage_t *subpage;
246
Paolo Bonzini0475d942013-05-29 12:28:21 +0200247 section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
248 d->nodes, d->sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200249 if (resolve_subpage && section->mr->subpage) {
250 subpage = container_of(section->mr, subpage_t, iomem);
Paolo Bonzini0475d942013-05-29 12:28:21 +0200251 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200252 }
253 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200254}
255
Jan Kiszka90260c62013-05-26 21:46:51 +0200256static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200257address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200258 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200259{
260 MemoryRegionSection *section;
261 Int128 diff;
262
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200263 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200264 /* Compute offset within MemoryRegionSection */
265 addr -= section->offset_within_address_space;
266
267 /* Compute offset within MemoryRegion */
268 *xlat = addr + section->offset_within_region;
269
270 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100271 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200272 return section;
273}
Jan Kiszka90260c62013-05-26 21:46:51 +0200274
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200275MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
276 hwaddr *xlat, hwaddr *plen,
277 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200278{
Avi Kivity30951152012-10-30 13:47:46 +0200279 IOMMUTLBEntry iotlb;
280 MemoryRegionSection *section;
281 MemoryRegion *mr;
282 hwaddr len = *plen;
283
284 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200285 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200286 mr = section->mr;
287
288 if (!mr->iommu_ops) {
289 break;
290 }
291
292 iotlb = mr->iommu_ops->translate(mr, addr);
293 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
294 | (addr & iotlb.addr_mask));
295 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
296 if (!(iotlb.perm & (1 << is_write))) {
297 mr = &io_mem_unassigned;
298 break;
299 }
300
301 as = iotlb.target_as;
302 }
303
304 *plen = len;
305 *xlat = addr;
306 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200307}
308
309MemoryRegionSection *
310address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
311 hwaddr *plen)
312{
Avi Kivity30951152012-10-30 13:47:46 +0200313 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200314 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200315
316 assert(!section->mr->iommu_ops);
317 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200318}
bellard9fa3e852004-01-04 18:06:42 +0000319#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000320
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200321void cpu_exec_init_all(void)
322{
323#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700324 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200325 memory_map_init();
326 io_mem_init();
327#endif
328}
329
Andreas Färberb170fce2013-01-20 20:23:22 +0100330#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000331
Juan Quintelae59fb372009-09-29 22:48:21 +0200332static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200333{
Andreas Färber259186a2013-01-17 18:51:17 +0100334 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200335
aurel323098dba2009-03-07 21:28:24 +0000336 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
337 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100338 cpu->interrupt_request &= ~0x01;
339 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000340
341 return 0;
342}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200343
Andreas Färber1a1562f2013-06-17 04:09:11 +0200344const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200345 .name = "cpu_common",
346 .version_id = 1,
347 .minimum_version_id = 1,
348 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200349 .post_load = cpu_common_post_load,
350 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100351 VMSTATE_UINT32(halted, CPUState),
352 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200353 VMSTATE_END_OF_LIST()
354 }
355};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200356
pbrook9656f322008-07-01 20:01:19 +0000357#endif
358
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100359CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400360{
Andreas Färberbdc44642013-06-24 23:50:24 +0200361 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400362
Andreas Färberbdc44642013-06-24 23:50:24 +0200363 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100364 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200365 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100366 }
Glauber Costa950f1472009-06-09 12:15:18 -0400367 }
368
Andreas Färberbdc44642013-06-24 23:50:24 +0200369 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400370}
371
Andreas Färber9349b4f2012-03-14 01:38:32 +0100372void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000373{
Andreas Färber9f09e182012-05-03 06:59:07 +0200374 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100375 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200376 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000377 int cpu_index;
378
pbrookc2764712009-03-07 15:24:59 +0000379#if defined(CONFIG_USER_ONLY)
380 cpu_list_lock();
381#endif
bellard6a00d602005-11-21 23:25:50 +0000382 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200383 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000384 cpu_index++;
385 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100386 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100387 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000388 QTAILQ_INIT(&env->breakpoints);
389 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100390#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200391 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100392#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200393 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000394#if defined(CONFIG_USER_ONLY)
395 cpu_list_unlock();
396#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200397 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
398 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
399 }
pbrookb3c77242008-06-30 16:31:04 +0000400#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600401 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000402 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100403 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200404 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000405#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100406 if (cc->vmsd != NULL) {
407 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
408 }
bellardfd6ce8f2003-05-14 19:00:11 +0000409}
410
bellard1fddef42005-04-17 19:16:13 +0000411#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000412#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200413static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000414{
415 tb_invalidate_phys_page_range(pc, pc + 1, 0);
416}
417#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200418static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400419{
Max Filippove8262a12013-09-27 22:29:17 +0400420 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
421 if (phys != -1) {
422 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
423 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400424}
bellardc27004e2005-01-03 23:35:10 +0000425#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000426#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000427
Paul Brookc527ee82010-03-01 03:31:14 +0000428#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100429void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000430
431{
432}
433
Andreas Färber9349b4f2012-03-14 01:38:32 +0100434int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000435 int flags, CPUWatchpoint **watchpoint)
436{
437 return -ENOSYS;
438}
439#else
pbrook6658ffb2007-03-16 23:58:11 +0000440/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100441int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000442 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000443{
aliguorib4051332008-11-18 20:14:20 +0000444 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000445 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000446
aliguorib4051332008-11-18 20:14:20 +0000447 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400448 if ((len & (len - 1)) || (addr & ~len_mask) ||
449 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000450 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
451 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
452 return -EINVAL;
453 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500454 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000455
aliguoria1d1bb32008-11-18 20:07:32 +0000456 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000457 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000458 wp->flags = flags;
459
aliguori2dc9f412008-11-18 20:56:59 +0000460 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000461 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000462 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000463 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000464 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000465
pbrook6658ffb2007-03-16 23:58:11 +0000466 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000467
468 if (watchpoint)
469 *watchpoint = wp;
470 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000471}
472
aliguoria1d1bb32008-11-18 20:07:32 +0000473/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100474int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000475 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000476{
aliguorib4051332008-11-18 20:14:20 +0000477 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000478 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000479
Blue Swirl72cf2d42009-09-12 07:36:22 +0000480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000481 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000482 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000483 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000484 return 0;
485 }
486 }
aliguoria1d1bb32008-11-18 20:07:32 +0000487 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000488}
489
aliguoria1d1bb32008-11-18 20:07:32 +0000490/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100491void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000492{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000493 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000494
aliguoria1d1bb32008-11-18 20:07:32 +0000495 tlb_flush_page(env, watchpoint->vaddr);
496
Anthony Liguori7267c092011-08-20 22:09:37 -0500497 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000498}
499
aliguoria1d1bb32008-11-18 20:07:32 +0000500/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100501void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000502{
aliguoric0ce9982008-11-25 22:13:57 +0000503 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000504
Blue Swirl72cf2d42009-09-12 07:36:22 +0000505 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000506 if (wp->flags & mask)
507 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000508 }
aliguoria1d1bb32008-11-18 20:07:32 +0000509}
Paul Brookc527ee82010-03-01 03:31:14 +0000510#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000511
512/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100513int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000514 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000515{
bellard1fddef42005-04-17 19:16:13 +0000516#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000517 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000518
Anthony Liguori7267c092011-08-20 22:09:37 -0500519 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000520
521 bp->pc = pc;
522 bp->flags = flags;
523
aliguori2dc9f412008-11-18 20:56:59 +0000524 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200525 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000526 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200527 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000528 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200529 }
aliguoria1d1bb32008-11-18 20:07:32 +0000530
Andreas Färber00b941e2013-06-29 18:55:54 +0200531 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000532
Andreas Färber00b941e2013-06-29 18:55:54 +0200533 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000534 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200535 }
aliguoria1d1bb32008-11-18 20:07:32 +0000536 return 0;
537#else
538 return -ENOSYS;
539#endif
540}
541
542/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100543int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000544{
545#if defined(TARGET_HAS_ICE)
546 CPUBreakpoint *bp;
547
Blue Swirl72cf2d42009-09-12 07:36:22 +0000548 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000549 if (bp->pc == pc && bp->flags == flags) {
550 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000551 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000552 }
bellard4c3a88a2003-07-26 12:06:08 +0000553 }
aliguoria1d1bb32008-11-18 20:07:32 +0000554 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000555#else
aliguoria1d1bb32008-11-18 20:07:32 +0000556 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000557#endif
558}
559
aliguoria1d1bb32008-11-18 20:07:32 +0000560/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100561void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000562{
bellard1fddef42005-04-17 19:16:13 +0000563#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000564 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000565
Andreas Färber00b941e2013-06-29 18:55:54 +0200566 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000567
Anthony Liguori7267c092011-08-20 22:09:37 -0500568 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000569#endif
570}
571
572/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100573void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000574{
575#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000576 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000577
Blue Swirl72cf2d42009-09-12 07:36:22 +0000578 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000579 if (bp->flags & mask)
580 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000581 }
bellard4c3a88a2003-07-26 12:06:08 +0000582#endif
583}
584
bellardc33a3462003-07-29 20:50:33 +0000585/* enable or disable single step mode. EXCP_DEBUG is returned by the
586 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200587void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000588{
bellard1fddef42005-04-17 19:16:13 +0000589#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200590 if (cpu->singlestep_enabled != enabled) {
591 cpu->singlestep_enabled = enabled;
592 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200593 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200594 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100595 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000596 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200597 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000598 tb_flush(env);
599 }
bellardc33a3462003-07-29 20:50:33 +0000600 }
601#endif
602}
603
Andreas Färber9349b4f2012-03-14 01:38:32 +0100604void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000605{
Andreas Färber878096e2013-05-27 01:33:50 +0200606 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000607 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000608 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000609
610 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000611 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000612 fprintf(stderr, "qemu: fatal: ");
613 vfprintf(stderr, fmt, ap);
614 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200615 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000616 if (qemu_log_enabled()) {
617 qemu_log("qemu: fatal: ");
618 qemu_log_vprintf(fmt, ap2);
619 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200620 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000621 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000622 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000623 }
pbrook493ae1f2007-11-23 16:53:59 +0000624 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000625 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200626#if defined(CONFIG_USER_ONLY)
627 {
628 struct sigaction act;
629 sigfillset(&act.sa_mask);
630 act.sa_handler = SIG_DFL;
631 sigaction(SIGABRT, &act, NULL);
632 }
633#endif
bellard75012672003-06-21 13:11:07 +0000634 abort();
635}
636
bellard01243112004-01-04 15:48:17 +0000637#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200638static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
639{
640 RAMBlock *block;
641
642 /* The list is protected by the iothread lock here. */
643 block = ram_list.mru_block;
644 if (block && addr - block->offset < block->length) {
645 goto found;
646 }
647 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
648 if (addr - block->offset < block->length) {
649 goto found;
650 }
651 }
652
653 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
654 abort();
655
656found:
657 ram_list.mru_block = block;
658 return block;
659}
660
Juan Quintelad24981d2012-05-22 00:42:40 +0200661static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
662 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000663{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200664 RAMBlock *block;
665 ram_addr_t start1;
bellardf23db162005-08-21 19:12:28 +0000666
Paolo Bonzini041603f2013-09-09 17:49:45 +0200667 block = qemu_get_ram_block(start);
668 assert(block == qemu_get_ram_block(end - 1));
669 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000670 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200671}
672
673/* Note: start and end must be within the same ram block. */
674void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
675 int dirty_flags)
676{
677 uintptr_t length;
678
679 start &= TARGET_PAGE_MASK;
680 end = TARGET_PAGE_ALIGN(end);
681
682 length = end - start;
683 if (length == 0)
684 return;
685 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
686
687 if (tcg_enabled()) {
688 tlb_reset_dirty_range_all(start, end, length);
689 }
bellard1ccde1c2004-02-06 19:46:14 +0000690}
691
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000692static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000693{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200694 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000695 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200696 return ret;
aliguori74576192008-10-06 14:02:03 +0000697}
698
Avi Kivitya8170e52012-10-23 12:30:10 +0200699hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200700 MemoryRegionSection *section,
701 target_ulong vaddr,
702 hwaddr paddr, hwaddr xlat,
703 int prot,
704 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000705{
Avi Kivitya8170e52012-10-23 12:30:10 +0200706 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000707 CPUWatchpoint *wp;
708
Blue Swirlcc5bea62012-04-14 14:56:48 +0000709 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000710 /* Normal RAM. */
711 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200712 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000713 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200714 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000715 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200716 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000717 }
718 } else {
Paolo Bonzini0475d942013-05-29 12:28:21 +0200719 iotlb = section - address_space_memory.dispatch->sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200720 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000721 }
722
723 /* Make accesses to pages with watchpoints go via the
724 watchpoint trap routines. */
725 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
726 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
727 /* Avoid trapping reads of pages with a write breakpoint. */
728 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200729 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000730 *address |= TLB_MMIO;
731 break;
732 }
733 }
734 }
735
736 return iotlb;
737}
bellard9fa3e852004-01-04 18:06:42 +0000738#endif /* defined(CONFIG_USER_ONLY) */
739
pbrooke2eef172008-06-08 01:09:01 +0000740#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000741
Anthony Liguoric227f092009-10-01 16:12:16 -0500742static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200743 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200744static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200745
Stefan Weil575ddeb2013-09-29 20:56:45 +0200746static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200747
748/*
749 * Set a custom physical guest memory alloator.
750 * Accelerators with unusual needs may need this. Hopefully, we can
751 * get rid of it eventually.
752 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200753void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200754{
755 phys_mem_alloc = alloc;
756}
757
Avi Kivity5312bd82012-02-12 18:32:55 +0200758static uint16_t phys_section_add(MemoryRegionSection *section)
759{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200760 /* The physical section number is ORed with a page-aligned
761 * pointer to produce the iotlb entries. Thus it should
762 * never overflow into the page-aligned value.
763 */
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200764 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200765
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200766 if (next_map.sections_nb == next_map.sections_nb_alloc) {
767 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
768 16);
769 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
770 next_map.sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200771 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200772 next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200773 memory_region_ref(section->mr);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200774 return next_map.sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200775}
776
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200777static void phys_section_destroy(MemoryRegion *mr)
778{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200779 memory_region_unref(mr);
780
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200781 if (mr->subpage) {
782 subpage_t *subpage = container_of(mr, subpage_t, iomem);
783 memory_region_destroy(&subpage->iomem);
784 g_free(subpage);
785 }
786}
787
Paolo Bonzini60926662013-05-29 12:30:26 +0200788static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200789{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200790 while (map->sections_nb > 0) {
791 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200792 phys_section_destroy(section->mr);
793 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200794 g_free(map->sections);
795 g_free(map->nodes);
Paolo Bonzini60926662013-05-29 12:30:26 +0200796 g_free(map);
Avi Kivity5312bd82012-02-12 18:32:55 +0200797}
798
Avi Kivityac1970f2012-10-03 16:22:53 +0200799static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200800{
801 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200802 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200803 & TARGET_PAGE_MASK;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200804 MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
805 next_map.nodes, next_map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200806 MemoryRegionSection subsection = {
807 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200808 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200809 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200810 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200811
Avi Kivityf3705d52012-03-08 16:16:34 +0200812 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200813
Avi Kivityf3705d52012-03-08 16:16:34 +0200814 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200815 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200816 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200817 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200818 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200819 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200820 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200821 }
822 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200823 end = start + int128_get64(section->size) - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200824 subpage_register(subpage, start, end, phys_section_add(section));
825}
826
827
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200828static void register_multipage(AddressSpaceDispatch *d,
829 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000830{
Avi Kivitya8170e52012-10-23 12:30:10 +0200831 hwaddr start_addr = section->offset_within_address_space;
Avi Kivity5312bd82012-02-12 18:32:55 +0200832 uint16_t section_index = phys_section_add(section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200833 uint64_t num_pages = int128_get64(int128_rshift(section->size,
834 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200835
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200836 assert(num_pages);
837 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000838}
839
Avi Kivityac1970f2012-10-03 16:22:53 +0200840static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200841{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200842 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200843 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200844 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200845 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200846
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200847 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
848 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
849 - now.offset_within_address_space;
850
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200851 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200852 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200853 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200854 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200855 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200856 while (int128_ne(remain.size, now.size)) {
857 remain.size = int128_sub(remain.size, now.size);
858 remain.offset_within_address_space += int128_get64(now.size);
859 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400860 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200861 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200862 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800863 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200864 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200865 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400866 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200867 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200868 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400869 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200870 }
871}
872
Sheng Yang62a27442010-01-26 19:21:16 +0800873void qemu_flush_coalesced_mmio_buffer(void)
874{
875 if (kvm_enabled())
876 kvm_flush_coalesced_mmio_buffer();
877}
878
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700879void qemu_mutex_lock_ramlist(void)
880{
881 qemu_mutex_lock(&ram_list.mutex);
882}
883
884void qemu_mutex_unlock_ramlist(void)
885{
886 qemu_mutex_unlock(&ram_list.mutex);
887}
888
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200889#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300890
891#include <sys/vfs.h>
892
893#define HUGETLBFS_MAGIC 0x958458f6
894
895static long gethugepagesize(const char *path)
896{
897 struct statfs fs;
898 int ret;
899
900 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900901 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300902 } while (ret != 0 && errno == EINTR);
903
904 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900905 perror(path);
906 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300907 }
908
909 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900910 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300911
912 return fs.f_bsize;
913}
914
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200915static sigjmp_buf sigjump;
916
917static void sigbus_handler(int signal)
918{
919 siglongjmp(sigjump, 1);
920}
921
Alex Williamson04b16652010-07-02 11:13:17 -0600922static void *file_ram_alloc(RAMBlock *block,
923 ram_addr_t memory,
924 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300925{
926 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500927 char *sanitized_name;
928 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300929 void *area;
930 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300931 unsigned long hpagesize;
932
933 hpagesize = gethugepagesize(path);
934 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900935 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300936 }
937
938 if (memory < hpagesize) {
939 return NULL;
940 }
941
942 if (kvm_enabled() && !kvm_has_sync_mmu()) {
943 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
944 return NULL;
945 }
946
Peter Feiner8ca761f2013-03-04 13:54:25 -0500947 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
948 sanitized_name = g_strdup(block->mr->name);
949 for (c = sanitized_name; *c != '\0'; c++) {
950 if (*c == '/')
951 *c = '_';
952 }
953
954 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
955 sanitized_name);
956 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300957
958 fd = mkstemp(filename);
959 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900960 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +0100961 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900962 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300963 }
964 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +0100965 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300966
967 memory = (memory+hpagesize-1) & ~(hpagesize-1);
968
969 /*
970 * ftruncate is not supported by hugetlbfs in older
971 * hosts, so don't bother bailing out on errors.
972 * If anything goes wrong with it under other filesystems,
973 * mmap will fail.
974 */
975 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900976 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -0300977
Marcelo Tosattic9027602010-03-01 20:25:08 -0300978 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300979 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900980 perror("file_ram_alloc: can't mmap RAM pages");
981 close(fd);
982 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300983 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200984
985 if (mem_prealloc) {
986 int ret, i;
987 struct sigaction act, oldact;
988 sigset_t set, oldset;
989
990 memset(&act, 0, sizeof(act));
991 act.sa_handler = &sigbus_handler;
992 act.sa_flags = 0;
993
994 ret = sigaction(SIGBUS, &act, &oldact);
995 if (ret) {
996 perror("file_ram_alloc: failed to install signal handler");
997 exit(1);
998 }
999
1000 /* unblock SIGBUS */
1001 sigemptyset(&set);
1002 sigaddset(&set, SIGBUS);
1003 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1004
1005 if (sigsetjmp(sigjump, 1)) {
1006 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1007 exit(1);
1008 }
1009
1010 /* MAP_POPULATE silently ignores failures */
1011 for (i = 0; i < (memory/hpagesize)-1; i++) {
1012 memset(area + (hpagesize*i), 0, 1);
1013 }
1014
1015 ret = sigaction(SIGBUS, &oldact, NULL);
1016 if (ret) {
1017 perror("file_ram_alloc: failed to reinstall signal handler");
1018 exit(1);
1019 }
1020
1021 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1022 }
1023
Alex Williamson04b16652010-07-02 11:13:17 -06001024 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001025 return area;
1026}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001027#else
1028static void *file_ram_alloc(RAMBlock *block,
1029 ram_addr_t memory,
1030 const char *path)
1031{
1032 fprintf(stderr, "-mem-path not supported on this host\n");
1033 exit(1);
1034}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001035#endif
1036
Alex Williamsond17b5282010-06-25 11:08:38 -06001037static ram_addr_t find_ram_offset(ram_addr_t size)
1038{
Alex Williamson04b16652010-07-02 11:13:17 -06001039 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001040 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001041
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001042 assert(size != 0); /* it would hand out same offset multiple times */
1043
Paolo Bonzinia3161032012-11-14 15:54:48 +01001044 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001045 return 0;
1046
Paolo Bonzinia3161032012-11-14 15:54:48 +01001047 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001048 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001049
1050 end = block->offset + block->length;
1051
Paolo Bonzinia3161032012-11-14 15:54:48 +01001052 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001053 if (next_block->offset >= end) {
1054 next = MIN(next, next_block->offset);
1055 }
1056 }
1057 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001058 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001059 mingap = next - end;
1060 }
1061 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001062
1063 if (offset == RAM_ADDR_MAX) {
1064 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1065 (uint64_t)size);
1066 abort();
1067 }
1068
Alex Williamson04b16652010-07-02 11:13:17 -06001069 return offset;
1070}
1071
Juan Quintela652d7ec2012-07-20 10:37:54 +02001072ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001073{
Alex Williamsond17b5282010-06-25 11:08:38 -06001074 RAMBlock *block;
1075 ram_addr_t last = 0;
1076
Paolo Bonzinia3161032012-11-14 15:54:48 +01001077 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001078 last = MAX(last, block->offset + block->length);
1079
1080 return last;
1081}
1082
Jason Baronddb97f12012-08-02 15:44:16 -04001083static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1084{
1085 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001086
1087 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001088 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1089 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001090 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1091 if (ret) {
1092 perror("qemu_madvise");
1093 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1094 "but dump_guest_core=off specified\n");
1095 }
1096 }
1097}
1098
Avi Kivityc5705a72011-12-20 15:59:12 +02001099void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001100{
1101 RAMBlock *new_block, *block;
1102
Avi Kivityc5705a72011-12-20 15:59:12 +02001103 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001104 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001105 if (block->offset == addr) {
1106 new_block = block;
1107 break;
1108 }
1109 }
1110 assert(new_block);
1111 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001112
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001113 if (dev) {
1114 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001115 if (id) {
1116 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001117 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001118 }
1119 }
1120 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1121
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001122 /* This assumes the iothread lock is taken here too. */
1123 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001124 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001125 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001126 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1127 new_block->idstr);
1128 abort();
1129 }
1130 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001131 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001132}
1133
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001134static int memory_try_enable_merging(void *addr, size_t len)
1135{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001136 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001137 /* disabled by the user */
1138 return 0;
1139 }
1140
1141 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1142}
1143
Avi Kivityc5705a72011-12-20 15:59:12 +02001144ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1145 MemoryRegion *mr)
1146{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001147 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001148
1149 size = TARGET_PAGE_ALIGN(size);
1150 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001151 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001152
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001153 /* This assumes the iothread lock is taken here too. */
1154 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001155 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001156 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001157 if (host) {
1158 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001159 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001160 } else if (xen_enabled()) {
1161 if (mem_path) {
1162 fprintf(stderr, "-mem-path not supported with Xen\n");
1163 exit(1);
1164 }
1165 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001166 } else {
1167 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001168 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1169 /*
1170 * file_ram_alloc() needs to allocate just like
1171 * phys_mem_alloc, but we haven't bothered to provide
1172 * a hook there.
1173 */
1174 fprintf(stderr,
1175 "-mem-path not supported with this accelerator\n");
1176 exit(1);
1177 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001178 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001179 }
1180 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001181 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001182 if (!new_block->host) {
1183 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1184 new_block->mr->name, strerror(errno));
1185 exit(1);
1186 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001187 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001188 }
1189 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001190 new_block->length = size;
1191
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001192 /* Keep the list sorted from biggest to smallest block. */
1193 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1194 if (block->length < new_block->length) {
1195 break;
1196 }
1197 }
1198 if (block) {
1199 QTAILQ_INSERT_BEFORE(block, new_block, next);
1200 } else {
1201 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1202 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001203 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001204
Umesh Deshpandef798b072011-08-18 11:41:17 -07001205 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001206 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001207
Anthony Liguori7267c092011-08-20 22:09:37 -05001208 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001209 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001210 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1211 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001212 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001213
Jason Baronddb97f12012-08-02 15:44:16 -04001214 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001215 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001216 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001217
Cam Macdonell84b89d72010-07-26 18:10:57 -06001218 if (kvm_enabled())
1219 kvm_setup_guest_memory(new_block->host, size);
1220
1221 return new_block->offset;
1222}
1223
Avi Kivityc5705a72011-12-20 15:59:12 +02001224ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001225{
Avi Kivityc5705a72011-12-20 15:59:12 +02001226 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001227}
bellarde9a1ab12007-02-08 23:08:38 +00001228
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001229void qemu_ram_free_from_ptr(ram_addr_t addr)
1230{
1231 RAMBlock *block;
1232
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001233 /* This assumes the iothread lock is taken here too. */
1234 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001235 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001236 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001237 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001238 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001239 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001240 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001241 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001242 }
1243 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001244 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001245}
1246
Anthony Liguoric227f092009-10-01 16:12:16 -05001247void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001248{
Alex Williamson04b16652010-07-02 11:13:17 -06001249 RAMBlock *block;
1250
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001251 /* This assumes the iothread lock is taken here too. */
1252 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001253 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001254 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001255 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001256 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001257 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001258 if (block->flags & RAM_PREALLOC_MASK) {
1259 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001260 } else if (xen_enabled()) {
1261 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001262#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001263 } else if (block->fd >= 0) {
1264 munmap(block->host, block->length);
1265 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001266#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001267 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001268 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001269 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001270 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001271 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001272 }
1273 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001274 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001275
bellarde9a1ab12007-02-08 23:08:38 +00001276}
1277
Huang Yingcd19cfa2011-03-02 08:56:19 +01001278#ifndef _WIN32
1279void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1280{
1281 RAMBlock *block;
1282 ram_addr_t offset;
1283 int flags;
1284 void *area, *vaddr;
1285
Paolo Bonzinia3161032012-11-14 15:54:48 +01001286 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001287 offset = addr - block->offset;
1288 if (offset < block->length) {
1289 vaddr = block->host + offset;
1290 if (block->flags & RAM_PREALLOC_MASK) {
1291 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001292 } else if (xen_enabled()) {
1293 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001294 } else {
1295 flags = MAP_FIXED;
1296 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001297 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001298#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001299 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1300 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001301#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001302 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001303#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001304 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1305 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001306 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001307 /*
1308 * Remap needs to match alloc. Accelerators that
1309 * set phys_mem_alloc never remap. If they did,
1310 * we'd need a remap hook here.
1311 */
1312 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1313
Huang Yingcd19cfa2011-03-02 08:56:19 +01001314 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1315 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1316 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001317 }
1318 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001319 fprintf(stderr, "Could not remap addr: "
1320 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001321 length, addr);
1322 exit(1);
1323 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001324 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001325 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001326 }
1327 return;
1328 }
1329 }
1330}
1331#endif /* !_WIN32 */
1332
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001333/* Return a host pointer to ram allocated with qemu_ram_alloc.
1334 With the exception of the softmmu code in this file, this should
1335 only be used for local memory (e.g. video ram) that the device owns,
1336 and knows it isn't going to access beyond the end of the block.
1337
1338 It should not be used for general purpose DMA.
1339 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1340 */
1341void *qemu_get_ram_ptr(ram_addr_t addr)
1342{
1343 RAMBlock *block = qemu_get_ram_block(addr);
1344
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001345 if (xen_enabled()) {
1346 /* We need to check if the requested address is in the RAM
1347 * because we don't want to map the entire memory in QEMU.
1348 * In that case just map until the end of the page.
1349 */
1350 if (block->offset == 0) {
1351 return xen_map_cache(addr, 0, 0);
1352 } else if (block->host == NULL) {
1353 block->host =
1354 xen_map_cache(block->offset, block->length, 1);
1355 }
1356 }
1357 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001358}
1359
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001360/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1361 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001362static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001363{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001364 if (*size == 0) {
1365 return NULL;
1366 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001367 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001368 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001369 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001370 RAMBlock *block;
1371
Paolo Bonzinia3161032012-11-14 15:54:48 +01001372 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001373 if (addr - block->offset < block->length) {
1374 if (addr - block->offset + *size > block->length)
1375 *size = block->length - addr + block->offset;
1376 return block->host + (addr - block->offset);
1377 }
1378 }
1379
1380 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1381 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001382 }
1383}
1384
Paolo Bonzini7443b432013-06-03 12:44:02 +02001385/* Some of the softmmu routines need to translate from a host pointer
1386 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001387MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001388{
pbrook94a6b542009-04-11 17:15:54 +00001389 RAMBlock *block;
1390 uint8_t *host = ptr;
1391
Jan Kiszka868bb332011-06-21 22:59:09 +02001392 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001393 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001394 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001395 }
1396
Paolo Bonzini23887b72013-05-06 14:28:39 +02001397 block = ram_list.mru_block;
1398 if (block && block->host && host - block->host < block->length) {
1399 goto found;
1400 }
1401
Paolo Bonzinia3161032012-11-14 15:54:48 +01001402 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001403 /* This case append when the block is not mapped. */
1404 if (block->host == NULL) {
1405 continue;
1406 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001407 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001408 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001409 }
pbrook94a6b542009-04-11 17:15:54 +00001410 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001411
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001412 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001413
1414found:
1415 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001416 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001417}
Alex Williamsonf471a172010-06-11 11:11:42 -06001418
Avi Kivitya8170e52012-10-23 12:30:10 +02001419static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001420 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001421{
bellard3a7d9292005-08-21 09:26:42 +00001422 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001423 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001424 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001425 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001426 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001427 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001428 switch (size) {
1429 case 1:
1430 stb_p(qemu_get_ram_ptr(ram_addr), val);
1431 break;
1432 case 2:
1433 stw_p(qemu_get_ram_ptr(ram_addr), val);
1434 break;
1435 case 4:
1436 stl_p(qemu_get_ram_ptr(ram_addr), val);
1437 break;
1438 default:
1439 abort();
1440 }
bellardf23db162005-08-21 19:12:28 +00001441 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001442 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001443 /* we remove the notdirty callback only if the code has been
1444 flushed */
Andreas Färber4917cf42013-05-27 05:17:50 +02001445 if (dirty_flags == 0xff) {
1446 CPUArchState *env = current_cpu->env_ptr;
1447 tlb_set_dirty(env, env->mem_io_vaddr);
1448 }
bellard1ccde1c2004-02-06 19:46:14 +00001449}
1450
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001451static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1452 unsigned size, bool is_write)
1453{
1454 return is_write;
1455}
1456
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001457static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001458 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001459 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001460 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001461};
1462
pbrook0f459d12008-06-09 00:20:13 +00001463/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001464static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001465{
Andreas Färber4917cf42013-05-27 05:17:50 +02001466 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001467 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001468 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001469 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001470 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001471
aliguori06d55cc2008-11-18 20:24:06 +00001472 if (env->watchpoint_hit) {
1473 /* We re-entered the check after replacing the TB. Now raise
1474 * the debug interrupt so that is will trigger after the
1475 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001476 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001477 return;
1478 }
pbrook2e70f6e2008-06-29 01:03:05 +00001479 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001480 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001481 if ((vaddr == (wp->vaddr & len_mask) ||
1482 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001483 wp->flags |= BP_WATCHPOINT_HIT;
1484 if (!env->watchpoint_hit) {
1485 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001486 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001487 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1488 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001489 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001490 } else {
1491 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1492 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001493 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001494 }
aliguori06d55cc2008-11-18 20:24:06 +00001495 }
aliguori6e140f22008-11-18 20:37:55 +00001496 } else {
1497 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001498 }
1499 }
1500}
1501
pbrook6658ffb2007-03-16 23:58:11 +00001502/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1503 so these check for a hit then pass through to the normal out-of-line
1504 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001505static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001506 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001507{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001508 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1509 switch (size) {
1510 case 1: return ldub_phys(addr);
1511 case 2: return lduw_phys(addr);
1512 case 4: return ldl_phys(addr);
1513 default: abort();
1514 }
pbrook6658ffb2007-03-16 23:58:11 +00001515}
1516
Avi Kivitya8170e52012-10-23 12:30:10 +02001517static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001518 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001519{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001520 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1521 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001522 case 1:
1523 stb_phys(addr, val);
1524 break;
1525 case 2:
1526 stw_phys(addr, val);
1527 break;
1528 case 4:
1529 stl_phys(addr, val);
1530 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001531 default: abort();
1532 }
pbrook6658ffb2007-03-16 23:58:11 +00001533}
1534
Avi Kivity1ec9b902012-01-02 12:47:48 +02001535static const MemoryRegionOps watch_mem_ops = {
1536 .read = watch_mem_read,
1537 .write = watch_mem_write,
1538 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001539};
pbrook6658ffb2007-03-16 23:58:11 +00001540
Avi Kivitya8170e52012-10-23 12:30:10 +02001541static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001542 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001543{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001544 subpage_t *subpage = opaque;
1545 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001546
blueswir1db7b5422007-05-26 17:36:03 +00001547#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001548 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001549 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001550#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001551 address_space_read(subpage->as, addr + subpage->base, buf, len);
1552 switch (len) {
1553 case 1:
1554 return ldub_p(buf);
1555 case 2:
1556 return lduw_p(buf);
1557 case 4:
1558 return ldl_p(buf);
1559 default:
1560 abort();
1561 }
blueswir1db7b5422007-05-26 17:36:03 +00001562}
1563
Avi Kivitya8170e52012-10-23 12:30:10 +02001564static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001565 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001566{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001567 subpage_t *subpage = opaque;
1568 uint8_t buf[4];
1569
blueswir1db7b5422007-05-26 17:36:03 +00001570#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001571 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001572 " value %"PRIx64"\n",
1573 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001574#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001575 switch (len) {
1576 case 1:
1577 stb_p(buf, value);
1578 break;
1579 case 2:
1580 stw_p(buf, value);
1581 break;
1582 case 4:
1583 stl_p(buf, value);
1584 break;
1585 default:
1586 abort();
1587 }
1588 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001589}
1590
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001591static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001592 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001593{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001594 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001595#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001596 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001597 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001598#endif
1599
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001600 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001601 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001602}
1603
Avi Kivity70c68e42012-01-02 12:32:48 +02001604static const MemoryRegionOps subpage_ops = {
1605 .read = subpage_read,
1606 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001607 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001608 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001609};
1610
Anthony Liguoric227f092009-10-01 16:12:16 -05001611static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001612 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001613{
1614 int idx, eidx;
1615
1616 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1617 return -1;
1618 idx = SUBPAGE_IDX(start);
1619 eidx = SUBPAGE_IDX(end);
1620#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001621 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1622 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001623#endif
blueswir1db7b5422007-05-26 17:36:03 +00001624 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001625 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001626 }
1627
1628 return 0;
1629}
1630
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001631static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001632{
Anthony Liguoric227f092009-10-01 16:12:16 -05001633 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001634
Anthony Liguori7267c092011-08-20 22:09:37 -05001635 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001636
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001637 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001638 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001639 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001640 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001641 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001642#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001643 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1644 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001645#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001646 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001647
1648 return mmio;
1649}
1650
Avi Kivity5312bd82012-02-12 18:32:55 +02001651static uint16_t dummy_section(MemoryRegion *mr)
1652{
1653 MemoryRegionSection section = {
1654 .mr = mr,
1655 .offset_within_address_space = 0,
1656 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001657 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001658 };
1659
1660 return phys_section_add(&section);
1661}
1662
Avi Kivitya8170e52012-10-23 12:30:10 +02001663MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001664{
Paolo Bonzini0475d942013-05-29 12:28:21 +02001665 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001666}
1667
Avi Kivitye9179ce2009-06-14 11:38:52 +03001668static void io_mem_init(void)
1669{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001670 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1671 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001672 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001673 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001674 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001675 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001676 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001677}
1678
Avi Kivityac1970f2012-10-03 16:22:53 +02001679static void mem_begin(MemoryListener *listener)
1680{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001681 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001682 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1683
1684 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
1685 d->as = as;
1686 as->next_dispatch = d;
1687}
1688
1689static void mem_commit(MemoryListener *listener)
1690{
1691 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001692 AddressSpaceDispatch *cur = as->dispatch;
1693 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001694
Paolo Bonzini0475d942013-05-29 12:28:21 +02001695 next->nodes = next_map.nodes;
1696 next->sections = next_map.sections;
1697
1698 as->dispatch = next;
1699 g_free(cur);
Avi Kivityac1970f2012-10-03 16:22:53 +02001700}
1701
Avi Kivity50c1e142012-02-08 21:36:02 +02001702static void core_begin(MemoryListener *listener)
1703{
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001704 uint16_t n;
1705
Paolo Bonzini60926662013-05-29 12:30:26 +02001706 prev_map = g_new(PhysPageMap, 1);
1707 *prev_map = next_map;
1708
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001709 memset(&next_map, 0, sizeof(next_map));
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001710 n = dummy_section(&io_mem_unassigned);
1711 assert(n == PHYS_SECTION_UNASSIGNED);
1712 n = dummy_section(&io_mem_notdirty);
1713 assert(n == PHYS_SECTION_NOTDIRTY);
1714 n = dummy_section(&io_mem_rom);
1715 assert(n == PHYS_SECTION_ROM);
1716 n = dummy_section(&io_mem_watch);
1717 assert(n == PHYS_SECTION_WATCH);
Avi Kivity50c1e142012-02-08 21:36:02 +02001718}
1719
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001720/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1721 * All AddressSpaceDispatch instances have switched to the next map.
1722 */
1723static void core_commit(MemoryListener *listener)
1724{
Paolo Bonzini60926662013-05-29 12:30:26 +02001725 phys_sections_free(prev_map);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001726}
1727
Avi Kivity1d711482012-10-02 18:54:45 +02001728static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001729{
Andreas Färber182735e2013-05-29 22:29:20 +02001730 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001731
1732 /* since each CPU stores ram addresses in its TLB cache, we must
1733 reset the modified entries */
1734 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001735 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001736 CPUArchState *env = cpu->env_ptr;
1737
Avi Kivity117712c2012-02-12 21:23:17 +02001738 tlb_flush(env, 1);
1739 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001740}
1741
Avi Kivity93632742012-02-08 16:54:16 +02001742static void core_log_global_start(MemoryListener *listener)
1743{
1744 cpu_physical_memory_set_dirty_tracking(1);
1745}
1746
1747static void core_log_global_stop(MemoryListener *listener)
1748{
1749 cpu_physical_memory_set_dirty_tracking(0);
1750}
1751
Avi Kivity93632742012-02-08 16:54:16 +02001752static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001753 .begin = core_begin,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001754 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02001755 .log_global_start = core_log_global_start,
1756 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001757 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001758};
1759
Avi Kivity1d711482012-10-02 18:54:45 +02001760static MemoryListener tcg_memory_listener = {
1761 .commit = tcg_commit,
1762};
1763
Avi Kivityac1970f2012-10-03 16:22:53 +02001764void address_space_init_dispatch(AddressSpace *as)
1765{
Paolo Bonzini00752702013-05-29 12:13:54 +02001766 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001767 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001768 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001769 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001770 .region_add = mem_add,
1771 .region_nop = mem_add,
1772 .priority = 0,
1773 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001774 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001775}
1776
Avi Kivity83f3c252012-10-07 12:59:55 +02001777void address_space_destroy_dispatch(AddressSpace *as)
1778{
1779 AddressSpaceDispatch *d = as->dispatch;
1780
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001781 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001782 g_free(d);
1783 as->dispatch = NULL;
1784}
1785
Avi Kivity62152b82011-07-26 14:26:14 +03001786static void memory_map_init(void)
1787{
Anthony Liguori7267c092011-08-20 22:09:37 -05001788 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001789
1790 assert(ADDR_SPACE_BITS <= 64);
1791
1792 memory_region_init(system_memory, NULL, "system",
1793 ADDR_SPACE_BITS == 64 ?
1794 UINT64_MAX : (0x1ULL << ADDR_SPACE_BITS));
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001795 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001796
Anthony Liguori7267c092011-08-20 22:09:37 -05001797 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001798 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1799 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001800 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001801
Avi Kivityf6790af2012-10-02 20:13:51 +02001802 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001803 if (tcg_enabled()) {
1804 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1805 }
Avi Kivity62152b82011-07-26 14:26:14 +03001806}
1807
1808MemoryRegion *get_system_memory(void)
1809{
1810 return system_memory;
1811}
1812
Avi Kivity309cb472011-08-08 16:09:03 +03001813MemoryRegion *get_system_io(void)
1814{
1815 return system_io;
1816}
1817
pbrooke2eef172008-06-08 01:09:01 +00001818#endif /* !defined(CONFIG_USER_ONLY) */
1819
bellard13eb76e2004-01-24 15:23:36 +00001820/* physical memory access (slow version, mainly for debug) */
1821#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001822int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001823 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001824{
1825 int l, flags;
1826 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001827 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001828
1829 while (len > 0) {
1830 page = addr & TARGET_PAGE_MASK;
1831 l = (page + TARGET_PAGE_SIZE) - addr;
1832 if (l > len)
1833 l = len;
1834 flags = page_get_flags(page);
1835 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001836 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001837 if (is_write) {
1838 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001839 return -1;
bellard579a97f2007-11-11 14:26:47 +00001840 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001841 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001842 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001843 memcpy(p, buf, l);
1844 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001845 } else {
1846 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001847 return -1;
bellard579a97f2007-11-11 14:26:47 +00001848 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001849 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001850 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001851 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001852 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001853 }
1854 len -= l;
1855 buf += l;
1856 addr += l;
1857 }
Paul Brooka68fe892010-03-01 00:08:59 +00001858 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001859}
bellard8df1cd02005-01-28 22:37:22 +00001860
bellard13eb76e2004-01-24 15:23:36 +00001861#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001862
Avi Kivitya8170e52012-10-23 12:30:10 +02001863static void invalidate_and_set_dirty(hwaddr addr,
1864 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001865{
1866 if (!cpu_physical_memory_is_dirty(addr)) {
1867 /* invalidate code */
1868 tb_invalidate_phys_page_range(addr, addr + length, 0);
1869 /* set dirty bit */
1870 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1871 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001872 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001873}
1874
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001875static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1876{
1877 if (memory_region_is_ram(mr)) {
1878 return !(is_write && mr->readonly);
1879 }
1880 if (memory_region_is_romd(mr)) {
1881 return !is_write;
1882 }
1883
1884 return false;
1885}
1886
Richard Henderson23326162013-07-08 14:55:59 -07001887static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001888{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001889 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001890
1891 /* Regions are assumed to support 1-4 byte accesses unless
1892 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001893 if (access_size_max == 0) {
1894 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001895 }
Richard Henderson23326162013-07-08 14:55:59 -07001896
1897 /* Bound the maximum access by the alignment of the address. */
1898 if (!mr->ops->impl.unaligned) {
1899 unsigned align_size_max = addr & -addr;
1900 if (align_size_max != 0 && align_size_max < access_size_max) {
1901 access_size_max = align_size_max;
1902 }
1903 }
1904
1905 /* Don't attempt accesses larger than the maximum. */
1906 if (l > access_size_max) {
1907 l = access_size_max;
1908 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001909 if (l & (l - 1)) {
1910 l = 1 << (qemu_fls(l) - 1);
1911 }
Richard Henderson23326162013-07-08 14:55:59 -07001912
1913 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001914}
1915
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001916bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001917 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001918{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001919 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001920 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001921 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001922 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001923 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001924 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001925
bellard13eb76e2004-01-24 15:23:36 +00001926 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001927 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001928 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001929
bellard13eb76e2004-01-24 15:23:36 +00001930 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001931 if (!memory_access_is_direct(mr, is_write)) {
1932 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001933 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001934 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001935 switch (l) {
1936 case 8:
1937 /* 64 bit write access */
1938 val = ldq_p(buf);
1939 error |= io_mem_write(mr, addr1, val, 8);
1940 break;
1941 case 4:
bellard1c213d12005-09-03 10:49:04 +00001942 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001943 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001944 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07001945 break;
1946 case 2:
bellard1c213d12005-09-03 10:49:04 +00001947 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001948 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001949 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07001950 break;
1951 case 1:
bellard1c213d12005-09-03 10:49:04 +00001952 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001953 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001954 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07001955 break;
1956 default:
1957 abort();
bellard13eb76e2004-01-24 15:23:36 +00001958 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001959 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001960 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00001961 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00001962 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00001963 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001964 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00001965 }
1966 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001967 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00001968 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001969 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07001970 switch (l) {
1971 case 8:
1972 /* 64 bit read access */
1973 error |= io_mem_read(mr, addr1, &val, 8);
1974 stq_p(buf, val);
1975 break;
1976 case 4:
bellard13eb76e2004-01-24 15:23:36 +00001977 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001978 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00001979 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001980 break;
1981 case 2:
bellard13eb76e2004-01-24 15:23:36 +00001982 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001983 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00001984 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001985 break;
1986 case 1:
bellard1c213d12005-09-03 10:49:04 +00001987 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001988 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00001989 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07001990 break;
1991 default:
1992 abort();
bellard13eb76e2004-01-24 15:23:36 +00001993 }
1994 } else {
1995 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001996 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02001997 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00001998 }
1999 }
2000 len -= l;
2001 buf += l;
2002 addr += l;
2003 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002004
2005 return error;
bellard13eb76e2004-01-24 15:23:36 +00002006}
bellard8df1cd02005-01-28 22:37:22 +00002007
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002008bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002009 const uint8_t *buf, int len)
2010{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002011 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002012}
2013
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002014bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002015{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002016 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002017}
2018
2019
Avi Kivitya8170e52012-10-23 12:30:10 +02002020void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002021 int len, int is_write)
2022{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002023 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002024}
2025
bellardd0ecd2a2006-04-23 17:14:48 +00002026/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002027void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002028 const uint8_t *buf, int len)
2029{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002030 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002031 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002032 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002033 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002034
bellardd0ecd2a2006-04-23 17:14:48 +00002035 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002036 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002037 mr = address_space_translate(&address_space_memory,
2038 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002039
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002040 if (!(memory_region_is_ram(mr) ||
2041 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002042 /* do nothing */
2043 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002044 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002045 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002046 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002047 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002048 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002049 }
2050 len -= l;
2051 buf += l;
2052 addr += l;
2053 }
2054}
2055
aliguori6d16c2f2009-01-22 16:59:11 +00002056typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002057 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002058 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002059 hwaddr addr;
2060 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002061} BounceBuffer;
2062
2063static BounceBuffer bounce;
2064
aliguoriba223c22009-01-22 16:59:16 +00002065typedef struct MapClient {
2066 void *opaque;
2067 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002068 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002069} MapClient;
2070
Blue Swirl72cf2d42009-09-12 07:36:22 +00002071static QLIST_HEAD(map_client_list, MapClient) map_client_list
2072 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002073
2074void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2075{
Anthony Liguori7267c092011-08-20 22:09:37 -05002076 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002077
2078 client->opaque = opaque;
2079 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002080 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002081 return client;
2082}
2083
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002084static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002085{
2086 MapClient *client = (MapClient *)_client;
2087
Blue Swirl72cf2d42009-09-12 07:36:22 +00002088 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002089 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002090}
2091
2092static void cpu_notify_map_clients(void)
2093{
2094 MapClient *client;
2095
Blue Swirl72cf2d42009-09-12 07:36:22 +00002096 while (!QLIST_EMPTY(&map_client_list)) {
2097 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002098 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002099 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002100 }
2101}
2102
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002103bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2104{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002105 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002106 hwaddr l, xlat;
2107
2108 while (len > 0) {
2109 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002110 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2111 if (!memory_access_is_direct(mr, is_write)) {
2112 l = memory_access_size(mr, l, addr);
2113 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002114 return false;
2115 }
2116 }
2117
2118 len -= l;
2119 addr += l;
2120 }
2121 return true;
2122}
2123
aliguori6d16c2f2009-01-22 16:59:11 +00002124/* Map a physical memory region into a host virtual address.
2125 * May map a subset of the requested range, given by and returned in *plen.
2126 * May return NULL if resources needed to perform the mapping are exhausted.
2127 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002128 * Use cpu_register_map_client() to know when retrying the map operation is
2129 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002130 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002131void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002132 hwaddr addr,
2133 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002134 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002135{
Avi Kivitya8170e52012-10-23 12:30:10 +02002136 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002137 hwaddr done = 0;
2138 hwaddr l, xlat, base;
2139 MemoryRegion *mr, *this_mr;
2140 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002141
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002142 if (len == 0) {
2143 return NULL;
2144 }
aliguori6d16c2f2009-01-22 16:59:11 +00002145
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002146 l = len;
2147 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2148 if (!memory_access_is_direct(mr, is_write)) {
2149 if (bounce.buffer) {
2150 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002151 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002152 /* Avoid unbounded allocations */
2153 l = MIN(l, TARGET_PAGE_SIZE);
2154 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002155 bounce.addr = addr;
2156 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002157
2158 memory_region_ref(mr);
2159 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002160 if (!is_write) {
2161 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002162 }
aliguori6d16c2f2009-01-22 16:59:11 +00002163
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002164 *plen = l;
2165 return bounce.buffer;
2166 }
2167
2168 base = xlat;
2169 raddr = memory_region_get_ram_addr(mr);
2170
2171 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002172 len -= l;
2173 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002174 done += l;
2175 if (len == 0) {
2176 break;
2177 }
2178
2179 l = len;
2180 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2181 if (this_mr != mr || xlat != base + done) {
2182 break;
2183 }
aliguori6d16c2f2009-01-22 16:59:11 +00002184 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002185
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002186 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002187 *plen = done;
2188 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002189}
2190
Avi Kivityac1970f2012-10-03 16:22:53 +02002191/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002192 * Will also mark the memory as dirty if is_write == 1. access_len gives
2193 * the amount of memory that was actually read or written by the caller.
2194 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002195void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2196 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002197{
2198 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002199 MemoryRegion *mr;
2200 ram_addr_t addr1;
2201
2202 mr = qemu_ram_addr_from_host(buffer, &addr1);
2203 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002204 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002205 while (access_len) {
2206 unsigned l;
2207 l = TARGET_PAGE_SIZE;
2208 if (l > access_len)
2209 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002210 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002211 addr1 += l;
2212 access_len -= l;
2213 }
2214 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002215 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002216 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002217 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002218 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002219 return;
2220 }
2221 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002222 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002223 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002224 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002225 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002226 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002227 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002228}
bellardd0ecd2a2006-04-23 17:14:48 +00002229
Avi Kivitya8170e52012-10-23 12:30:10 +02002230void *cpu_physical_memory_map(hwaddr addr,
2231 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002232 int is_write)
2233{
2234 return address_space_map(&address_space_memory, addr, plen, is_write);
2235}
2236
Avi Kivitya8170e52012-10-23 12:30:10 +02002237void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2238 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002239{
2240 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2241}
2242
bellard8df1cd02005-01-28 22:37:22 +00002243/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002244static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002245 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002246{
bellard8df1cd02005-01-28 22:37:22 +00002247 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002248 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002249 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002250 hwaddr l = 4;
2251 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002252
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002253 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2254 false);
2255 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002256 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002257 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002258#if defined(TARGET_WORDS_BIGENDIAN)
2259 if (endian == DEVICE_LITTLE_ENDIAN) {
2260 val = bswap32(val);
2261 }
2262#else
2263 if (endian == DEVICE_BIG_ENDIAN) {
2264 val = bswap32(val);
2265 }
2266#endif
bellard8df1cd02005-01-28 22:37:22 +00002267 } else {
2268 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002269 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002270 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002271 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002272 switch (endian) {
2273 case DEVICE_LITTLE_ENDIAN:
2274 val = ldl_le_p(ptr);
2275 break;
2276 case DEVICE_BIG_ENDIAN:
2277 val = ldl_be_p(ptr);
2278 break;
2279 default:
2280 val = ldl_p(ptr);
2281 break;
2282 }
bellard8df1cd02005-01-28 22:37:22 +00002283 }
2284 return val;
2285}
2286
Avi Kivitya8170e52012-10-23 12:30:10 +02002287uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002288{
2289 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2290}
2291
Avi Kivitya8170e52012-10-23 12:30:10 +02002292uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002293{
2294 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2295}
2296
Avi Kivitya8170e52012-10-23 12:30:10 +02002297uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002298{
2299 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2300}
2301
bellard84b7b8e2005-11-28 21:19:04 +00002302/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002303static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002304 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002305{
bellard84b7b8e2005-11-28 21:19:04 +00002306 uint8_t *ptr;
2307 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002308 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002309 hwaddr l = 8;
2310 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002311
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002312 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2313 false);
2314 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002315 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002316 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002317#if defined(TARGET_WORDS_BIGENDIAN)
2318 if (endian == DEVICE_LITTLE_ENDIAN) {
2319 val = bswap64(val);
2320 }
2321#else
2322 if (endian == DEVICE_BIG_ENDIAN) {
2323 val = bswap64(val);
2324 }
2325#endif
bellard84b7b8e2005-11-28 21:19:04 +00002326 } else {
2327 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002328 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002329 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002330 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002331 switch (endian) {
2332 case DEVICE_LITTLE_ENDIAN:
2333 val = ldq_le_p(ptr);
2334 break;
2335 case DEVICE_BIG_ENDIAN:
2336 val = ldq_be_p(ptr);
2337 break;
2338 default:
2339 val = ldq_p(ptr);
2340 break;
2341 }
bellard84b7b8e2005-11-28 21:19:04 +00002342 }
2343 return val;
2344}
2345
Avi Kivitya8170e52012-10-23 12:30:10 +02002346uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002347{
2348 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2349}
2350
Avi Kivitya8170e52012-10-23 12:30:10 +02002351uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002352{
2353 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2354}
2355
Avi Kivitya8170e52012-10-23 12:30:10 +02002356uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002357{
2358 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2359}
2360
bellardaab33092005-10-30 20:48:42 +00002361/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002362uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002363{
2364 uint8_t val;
2365 cpu_physical_memory_read(addr, &val, 1);
2366 return val;
2367}
2368
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002369/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002370static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002371 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002372{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002373 uint8_t *ptr;
2374 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002375 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002376 hwaddr l = 2;
2377 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002378
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002379 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2380 false);
2381 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002382 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002383 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002384#if defined(TARGET_WORDS_BIGENDIAN)
2385 if (endian == DEVICE_LITTLE_ENDIAN) {
2386 val = bswap16(val);
2387 }
2388#else
2389 if (endian == DEVICE_BIG_ENDIAN) {
2390 val = bswap16(val);
2391 }
2392#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002393 } else {
2394 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002395 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002396 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002397 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002398 switch (endian) {
2399 case DEVICE_LITTLE_ENDIAN:
2400 val = lduw_le_p(ptr);
2401 break;
2402 case DEVICE_BIG_ENDIAN:
2403 val = lduw_be_p(ptr);
2404 break;
2405 default:
2406 val = lduw_p(ptr);
2407 break;
2408 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002409 }
2410 return val;
bellardaab33092005-10-30 20:48:42 +00002411}
2412
Avi Kivitya8170e52012-10-23 12:30:10 +02002413uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002414{
2415 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2416}
2417
Avi Kivitya8170e52012-10-23 12:30:10 +02002418uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002419{
2420 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2421}
2422
Avi Kivitya8170e52012-10-23 12:30:10 +02002423uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002424{
2425 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2426}
2427
bellard8df1cd02005-01-28 22:37:22 +00002428/* warning: addr must be aligned. The ram page is not masked as dirty
2429 and the code inside is not invalidated. It is useful if the dirty
2430 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002431void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002432{
bellard8df1cd02005-01-28 22:37:22 +00002433 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002434 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002435 hwaddr l = 4;
2436 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002437
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002438 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2439 true);
2440 if (l < 4 || !memory_access_is_direct(mr, true)) {
2441 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002442 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002443 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002444 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002445 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002446
2447 if (unlikely(in_migration)) {
2448 if (!cpu_physical_memory_is_dirty(addr1)) {
2449 /* invalidate code */
2450 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2451 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002452 cpu_physical_memory_set_dirty_flags(
2453 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002454 }
2455 }
bellard8df1cd02005-01-28 22:37:22 +00002456 }
2457}
2458
2459/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002460static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002461 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002462{
bellard8df1cd02005-01-28 22:37:22 +00002463 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002464 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002465 hwaddr l = 4;
2466 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002467
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002468 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2469 true);
2470 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002471#if defined(TARGET_WORDS_BIGENDIAN)
2472 if (endian == DEVICE_LITTLE_ENDIAN) {
2473 val = bswap32(val);
2474 }
2475#else
2476 if (endian == DEVICE_BIG_ENDIAN) {
2477 val = bswap32(val);
2478 }
2479#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002480 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002481 } else {
bellard8df1cd02005-01-28 22:37:22 +00002482 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002483 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002484 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002485 switch (endian) {
2486 case DEVICE_LITTLE_ENDIAN:
2487 stl_le_p(ptr, val);
2488 break;
2489 case DEVICE_BIG_ENDIAN:
2490 stl_be_p(ptr, val);
2491 break;
2492 default:
2493 stl_p(ptr, val);
2494 break;
2495 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002496 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002497 }
2498}
2499
Avi Kivitya8170e52012-10-23 12:30:10 +02002500void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002501{
2502 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2503}
2504
Avi Kivitya8170e52012-10-23 12:30:10 +02002505void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002506{
2507 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2508}
2509
Avi Kivitya8170e52012-10-23 12:30:10 +02002510void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002511{
2512 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2513}
2514
bellardaab33092005-10-30 20:48:42 +00002515/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002516void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002517{
2518 uint8_t v = val;
2519 cpu_physical_memory_write(addr, &v, 1);
2520}
2521
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002522/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002523static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002524 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002525{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002526 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002527 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002528 hwaddr l = 2;
2529 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002530
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002531 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2532 true);
2533 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002534#if defined(TARGET_WORDS_BIGENDIAN)
2535 if (endian == DEVICE_LITTLE_ENDIAN) {
2536 val = bswap16(val);
2537 }
2538#else
2539 if (endian == DEVICE_BIG_ENDIAN) {
2540 val = bswap16(val);
2541 }
2542#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002543 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002544 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002545 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002546 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002547 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002548 switch (endian) {
2549 case DEVICE_LITTLE_ENDIAN:
2550 stw_le_p(ptr, val);
2551 break;
2552 case DEVICE_BIG_ENDIAN:
2553 stw_be_p(ptr, val);
2554 break;
2555 default:
2556 stw_p(ptr, val);
2557 break;
2558 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002559 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002560 }
bellardaab33092005-10-30 20:48:42 +00002561}
2562
Avi Kivitya8170e52012-10-23 12:30:10 +02002563void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002564{
2565 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2566}
2567
Avi Kivitya8170e52012-10-23 12:30:10 +02002568void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002569{
2570 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2571}
2572
Avi Kivitya8170e52012-10-23 12:30:10 +02002573void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002574{
2575 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2576}
2577
bellardaab33092005-10-30 20:48:42 +00002578/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002579void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002580{
2581 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002582 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002583}
2584
Avi Kivitya8170e52012-10-23 12:30:10 +02002585void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002586{
2587 val = cpu_to_le64(val);
2588 cpu_physical_memory_write(addr, &val, 8);
2589}
2590
Avi Kivitya8170e52012-10-23 12:30:10 +02002591void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002592{
2593 val = cpu_to_be64(val);
2594 cpu_physical_memory_write(addr, &val, 8);
2595}
2596
aliguori5e2972f2009-03-28 17:51:36 +00002597/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002598int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002599 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002600{
2601 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002602 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002603 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002604
2605 while (len > 0) {
2606 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002607 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002608 /* if no physical page mapped, return an error */
2609 if (phys_addr == -1)
2610 return -1;
2611 l = (page + TARGET_PAGE_SIZE) - addr;
2612 if (l > len)
2613 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002614 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002615 if (is_write)
2616 cpu_physical_memory_write_rom(phys_addr, buf, l);
2617 else
aliguori5e2972f2009-03-28 17:51:36 +00002618 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002619 len -= l;
2620 buf += l;
2621 addr += l;
2622 }
2623 return 0;
2624}
Paul Brooka68fe892010-03-01 00:08:59 +00002625#endif
bellard13eb76e2004-01-24 15:23:36 +00002626
Blue Swirl8e4a4242013-01-06 18:30:17 +00002627#if !defined(CONFIG_USER_ONLY)
2628
2629/*
2630 * A helper function for the _utterly broken_ virtio device model to find out if
2631 * it's running on a big endian machine. Don't do this at home kids!
2632 */
2633bool virtio_is_big_endian(void);
2634bool virtio_is_big_endian(void)
2635{
2636#if defined(TARGET_WORDS_BIGENDIAN)
2637 return true;
2638#else
2639 return false;
2640#endif
2641}
2642
2643#endif
2644
Wen Congyang76f35532012-05-07 12:04:18 +08002645#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002646bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002647{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002648 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002649 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002650
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002651 mr = address_space_translate(&address_space_memory,
2652 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002653
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002654 return !(memory_region_is_ram(mr) ||
2655 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002656}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002657
2658void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2659{
2660 RAMBlock *block;
2661
2662 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2663 func(block->host, block->offset, block->length, opaque);
2664 }
2665}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002666#endif