blob: 67a073c540f3cf175f9256e77193c2ee4138be0e [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020054#include "qemu/range.h"
55
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
aliguori74576192008-10-06 14:02:03 +000059static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000060
Paolo Bonzinia3161032012-11-14 15:54:48 +010061RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030062
63static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030064static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030065
Avi Kivityf6790af2012-10-02 20:13:51 +020066AddressSpace address_space_io;
67AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Paolo Bonzini0844e002013-05-24 14:37:28 +020069MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020070static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färberbdc44642013-06-24 23:50:24 +020074struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020077DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020085typedef struct PhysPageEntry PhysPageEntry;
86
87struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020088 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020089 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020090 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020091 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020092};
93
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020094#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
95
Paolo Bonzini03f49952013-11-07 17:14:36 +010096/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010097#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +010098
Michael S. Tsirkin026736c2013-11-13 20:13:03 +020099#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100100#define P_L2_SIZE (1 << P_L2_BITS)
101
102#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
103
104typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200105
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200106struct AddressSpaceDispatch {
107 /* This is a multi-level map on the physical address space.
108 * The bottom level has pointers to MemoryRegionSections.
109 */
110 PhysPageEntry phys_map;
Paolo Bonzini0475d942013-05-29 12:28:21 +0200111 Node *nodes;
112 MemoryRegionSection *sections;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200113 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Jan Kiszka90260c62013-05-26 21:46:51 +0200116#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
117typedef struct subpage_t {
118 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200119 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200120 hwaddr base;
121 uint16_t sub_section[TARGET_PAGE_SIZE];
122} subpage_t;
123
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200124#define PHYS_SECTION_UNASSIGNED 0
125#define PHYS_SECTION_NOTDIRTY 1
126#define PHYS_SECTION_ROM 2
127#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200128
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200129typedef struct PhysPageMap {
130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136} PhysPageMap;
137
Paolo Bonzini60926662013-05-29 12:30:26 +0200138static PhysPageMap *prev_map;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200139static PhysPageMap next_map;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200140
pbrooke2eef172008-06-08 01:09:01 +0000141static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300142static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000143
Avi Kivity1ec9b902012-01-02 12:47:48 +0200144static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000145#endif
bellard54936002003-05-13 00:25:15 +0000146
Paul Brook6d9a1302010-02-28 23:55:53 +0000147#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200148
Avi Kivityf7bf5462012-02-13 20:12:05 +0200149static void phys_map_node_reserve(unsigned nodes)
150{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200151 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
152 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
153 16);
154 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
155 next_map.nodes_nb + nodes);
156 next_map.nodes = g_renew(Node, next_map.nodes,
157 next_map.nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200158 }
159}
160
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200161static uint32_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162{
163 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200164 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200166 ret = next_map.nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167 assert(ret != PHYS_MAP_NODE_NIL);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200168 assert(ret != next_map.nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100169 for (i = 0; i < P_L2_SIZE; ++i) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200170 next_map.nodes[ret][i].skip = 1;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200171 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200172 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200173 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200174}
175
Avi Kivitya8170e52012-10-23 12:30:10 +0200176static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
177 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200178 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200179{
180 PhysPageEntry *p;
181 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200183
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200185 lp->ptr = phys_map_node_alloc();
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200186 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100188 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200189 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191 }
192 }
193 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200194 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200195 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200197
Paolo Bonzini03f49952013-11-07 17:14:36 +0100198 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200199 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200200 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200201 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200202 *index += step;
203 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200204 } else {
205 phys_page_set_level(lp, index, nb, leaf, level - 1);
206 }
207 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200208 }
209}
210
Avi Kivityac1970f2012-10-03 16:22:53 +0200211static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200212 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200213 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000214{
Avi Kivity29990972012-02-13 20:21:20 +0200215 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200216 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000217
Avi Kivityac1970f2012-10-03 16:22:53 +0200218 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000219}
220
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
279 phys_page_compact(&d->phys_map, d->nodes, compacted);
280 }
281}
282
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200284 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000285{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200286 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200287 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200288 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200289
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200292 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200293 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200296 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200305}
306
Blue Swirle5548612012-04-21 13:08:33 +0000307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000310 && mr != &io_mem_watch;
311}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200312
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200314 hwaddr addr,
315 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200316{
Jan Kiszka90260c62013-05-26 21:46:51 +0200317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200320 section = phys_page_find(d->phys_map, addr, d->nodes, d->sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
Paolo Bonzini0475d942013-05-29 12:28:21 +0200323 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200324 }
325 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200326}
327
Jan Kiszka90260c62013-05-26 21:46:51 +0200328static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200330 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200331{
332 MemoryRegionSection *section;
333 Int128 diff;
334
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200335 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200344 return section;
345}
Jan Kiszka90260c62013-05-26 21:46:51 +0200346
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200347MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
348 hwaddr *xlat, hwaddr *plen,
349 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200350{
Avi Kivity30951152012-10-30 13:47:46 +0200351 IOMMUTLBEntry iotlb;
352 MemoryRegionSection *section;
353 MemoryRegion *mr;
354 hwaddr len = *plen;
355
356 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200357 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200358 mr = section->mr;
359
360 if (!mr->iommu_ops) {
361 break;
362 }
363
364 iotlb = mr->iommu_ops->translate(mr, addr);
365 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
366 | (addr & iotlb.addr_mask));
367 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
368 if (!(iotlb.perm & (1 << is_write))) {
369 mr = &io_mem_unassigned;
370 break;
371 }
372
373 as = iotlb.target_as;
374 }
375
376 *plen = len;
377 *xlat = addr;
378 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200379}
380
381MemoryRegionSection *
382address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
383 hwaddr *plen)
384{
Avi Kivity30951152012-10-30 13:47:46 +0200385 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200386 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200387
388 assert(!section->mr->iommu_ops);
389 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200390}
bellard9fa3e852004-01-04 18:06:42 +0000391#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000392
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200393void cpu_exec_init_all(void)
394{
395#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700396 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200397 memory_map_init();
398 io_mem_init();
399#endif
400}
401
Andreas Färberb170fce2013-01-20 20:23:22 +0100402#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000403
Juan Quintelae59fb372009-09-29 22:48:21 +0200404static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200405{
Andreas Färber259186a2013-01-17 18:51:17 +0100406 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200407
aurel323098dba2009-03-07 21:28:24 +0000408 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
409 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100410 cpu->interrupt_request &= ~0x01;
411 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000412
413 return 0;
414}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200415
Andreas Färber1a1562f2013-06-17 04:09:11 +0200416const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200417 .name = "cpu_common",
418 .version_id = 1,
419 .minimum_version_id = 1,
420 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200421 .post_load = cpu_common_post_load,
422 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100423 VMSTATE_UINT32(halted, CPUState),
424 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200425 VMSTATE_END_OF_LIST()
426 }
427};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200428
pbrook9656f322008-07-01 20:01:19 +0000429#endif
430
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100431CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400432{
Andreas Färberbdc44642013-06-24 23:50:24 +0200433 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400434
Andreas Färberbdc44642013-06-24 23:50:24 +0200435 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100436 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200437 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100438 }
Glauber Costa950f1472009-06-09 12:15:18 -0400439 }
440
Andreas Färberbdc44642013-06-24 23:50:24 +0200441 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400442}
443
Andreas Färber9349b4f2012-03-14 01:38:32 +0100444void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000445{
Andreas Färber9f09e182012-05-03 06:59:07 +0200446 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100447 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200448 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000449 int cpu_index;
450
pbrookc2764712009-03-07 15:24:59 +0000451#if defined(CONFIG_USER_ONLY)
452 cpu_list_lock();
453#endif
bellard6a00d602005-11-21 23:25:50 +0000454 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200455 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000456 cpu_index++;
457 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100458 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100459 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000460 QTAILQ_INIT(&env->breakpoints);
461 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100462#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200463 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100464#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200465 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000466#if defined(CONFIG_USER_ONLY)
467 cpu_list_unlock();
468#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200469 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
470 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
471 }
pbrookb3c77242008-06-30 16:31:04 +0000472#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600473 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000474 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100475 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200476 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000477#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100478 if (cc->vmsd != NULL) {
479 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
480 }
bellardfd6ce8f2003-05-14 19:00:11 +0000481}
482
bellard1fddef42005-04-17 19:16:13 +0000483#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000484#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200485static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000486{
487 tb_invalidate_phys_page_range(pc, pc + 1, 0);
488}
489#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200490static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400491{
Max Filippove8262a12013-09-27 22:29:17 +0400492 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
493 if (phys != -1) {
494 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
495 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400496}
bellardc27004e2005-01-03 23:35:10 +0000497#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000498#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000499
Paul Brookc527ee82010-03-01 03:31:14 +0000500#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100501void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000502
503{
504}
505
Andreas Färber9349b4f2012-03-14 01:38:32 +0100506int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000507 int flags, CPUWatchpoint **watchpoint)
508{
509 return -ENOSYS;
510}
511#else
pbrook6658ffb2007-03-16 23:58:11 +0000512/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100513int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000514 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000515{
aliguorib4051332008-11-18 20:14:20 +0000516 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000517 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000518
aliguorib4051332008-11-18 20:14:20 +0000519 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400520 if ((len & (len - 1)) || (addr & ~len_mask) ||
521 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000522 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
523 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
524 return -EINVAL;
525 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500526 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000527
aliguoria1d1bb32008-11-18 20:07:32 +0000528 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000529 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000530 wp->flags = flags;
531
aliguori2dc9f412008-11-18 20:56:59 +0000532 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000533 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000534 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000535 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000536 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000537
pbrook6658ffb2007-03-16 23:58:11 +0000538 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000539
540 if (watchpoint)
541 *watchpoint = wp;
542 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000543}
544
aliguoria1d1bb32008-11-18 20:07:32 +0000545/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100546int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000547 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000548{
aliguorib4051332008-11-18 20:14:20 +0000549 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000550 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000551
Blue Swirl72cf2d42009-09-12 07:36:22 +0000552 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000553 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000554 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000555 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000556 return 0;
557 }
558 }
aliguoria1d1bb32008-11-18 20:07:32 +0000559 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000560}
561
aliguoria1d1bb32008-11-18 20:07:32 +0000562/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100563void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000564{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000566
aliguoria1d1bb32008-11-18 20:07:32 +0000567 tlb_flush_page(env, watchpoint->vaddr);
568
Anthony Liguori7267c092011-08-20 22:09:37 -0500569 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000570}
571
aliguoria1d1bb32008-11-18 20:07:32 +0000572/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100573void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000574{
aliguoric0ce9982008-11-25 22:13:57 +0000575 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000576
Blue Swirl72cf2d42009-09-12 07:36:22 +0000577 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000578 if (wp->flags & mask)
579 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000580 }
aliguoria1d1bb32008-11-18 20:07:32 +0000581}
Paul Brookc527ee82010-03-01 03:31:14 +0000582#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000583
584/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100585int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000586 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000587{
bellard1fddef42005-04-17 19:16:13 +0000588#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000589 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000590
Anthony Liguori7267c092011-08-20 22:09:37 -0500591 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000592
593 bp->pc = pc;
594 bp->flags = flags;
595
aliguori2dc9f412008-11-18 20:56:59 +0000596 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200597 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000598 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200599 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000600 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200601 }
aliguoria1d1bb32008-11-18 20:07:32 +0000602
Andreas Färber00b941e2013-06-29 18:55:54 +0200603 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000604
Andreas Färber00b941e2013-06-29 18:55:54 +0200605 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000606 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200607 }
aliguoria1d1bb32008-11-18 20:07:32 +0000608 return 0;
609#else
610 return -ENOSYS;
611#endif
612}
613
614/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100615int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000616{
617#if defined(TARGET_HAS_ICE)
618 CPUBreakpoint *bp;
619
Blue Swirl72cf2d42009-09-12 07:36:22 +0000620 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000621 if (bp->pc == pc && bp->flags == flags) {
622 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000623 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000624 }
bellard4c3a88a2003-07-26 12:06:08 +0000625 }
aliguoria1d1bb32008-11-18 20:07:32 +0000626 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000627#else
aliguoria1d1bb32008-11-18 20:07:32 +0000628 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000629#endif
630}
631
aliguoria1d1bb32008-11-18 20:07:32 +0000632/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100633void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000634{
bellard1fddef42005-04-17 19:16:13 +0000635#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000636 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000637
Andreas Färber00b941e2013-06-29 18:55:54 +0200638 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000639
Anthony Liguori7267c092011-08-20 22:09:37 -0500640 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000641#endif
642}
643
644/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100645void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000646{
647#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000648 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000649
Blue Swirl72cf2d42009-09-12 07:36:22 +0000650 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000651 if (bp->flags & mask)
652 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000653 }
bellard4c3a88a2003-07-26 12:06:08 +0000654#endif
655}
656
bellardc33a3462003-07-29 20:50:33 +0000657/* enable or disable single step mode. EXCP_DEBUG is returned by the
658 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200659void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000660{
bellard1fddef42005-04-17 19:16:13 +0000661#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200662 if (cpu->singlestep_enabled != enabled) {
663 cpu->singlestep_enabled = enabled;
664 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200665 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200666 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100667 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000668 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200669 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000670 tb_flush(env);
671 }
bellardc33a3462003-07-29 20:50:33 +0000672 }
673#endif
674}
675
Andreas Färber9349b4f2012-03-14 01:38:32 +0100676void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000677{
Andreas Färber878096e2013-05-27 01:33:50 +0200678 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000679 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000680 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000681
682 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000683 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000684 fprintf(stderr, "qemu: fatal: ");
685 vfprintf(stderr, fmt, ap);
686 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200687 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000688 if (qemu_log_enabled()) {
689 qemu_log("qemu: fatal: ");
690 qemu_log_vprintf(fmt, ap2);
691 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200692 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000693 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000694 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000695 }
pbrook493ae1f2007-11-23 16:53:59 +0000696 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000697 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200698#if defined(CONFIG_USER_ONLY)
699 {
700 struct sigaction act;
701 sigfillset(&act.sa_mask);
702 act.sa_handler = SIG_DFL;
703 sigaction(SIGABRT, &act, NULL);
704 }
705#endif
bellard75012672003-06-21 13:11:07 +0000706 abort();
707}
708
bellard01243112004-01-04 15:48:17 +0000709#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200710static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
711{
712 RAMBlock *block;
713
714 /* The list is protected by the iothread lock here. */
715 block = ram_list.mru_block;
716 if (block && addr - block->offset < block->length) {
717 goto found;
718 }
719 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
720 if (addr - block->offset < block->length) {
721 goto found;
722 }
723 }
724
725 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
726 abort();
727
728found:
729 ram_list.mru_block = block;
730 return block;
731}
732
Juan Quintelad24981d2012-05-22 00:42:40 +0200733static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
734 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000735{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200736 RAMBlock *block;
737 ram_addr_t start1;
bellardf23db162005-08-21 19:12:28 +0000738
Paolo Bonzini041603f2013-09-09 17:49:45 +0200739 block = qemu_get_ram_block(start);
740 assert(block == qemu_get_ram_block(end - 1));
741 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000742 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200743}
744
745/* Note: start and end must be within the same ram block. */
746void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
747 int dirty_flags)
748{
749 uintptr_t length;
750
751 start &= TARGET_PAGE_MASK;
752 end = TARGET_PAGE_ALIGN(end);
753
754 length = end - start;
755 if (length == 0)
756 return;
757 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
758
759 if (tcg_enabled()) {
760 tlb_reset_dirty_range_all(start, end, length);
761 }
bellard1ccde1c2004-02-06 19:46:14 +0000762}
763
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000764static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000765{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200766 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000767 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200768 return ret;
aliguori74576192008-10-06 14:02:03 +0000769}
770
Avi Kivitya8170e52012-10-23 12:30:10 +0200771hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200772 MemoryRegionSection *section,
773 target_ulong vaddr,
774 hwaddr paddr, hwaddr xlat,
775 int prot,
776 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000777{
Avi Kivitya8170e52012-10-23 12:30:10 +0200778 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000779 CPUWatchpoint *wp;
780
Blue Swirlcc5bea62012-04-14 14:56:48 +0000781 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000782 /* Normal RAM. */
783 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200784 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000785 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200786 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000787 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200788 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000789 }
790 } else {
Paolo Bonzini0475d942013-05-29 12:28:21 +0200791 iotlb = section - address_space_memory.dispatch->sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200792 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000793 }
794
795 /* Make accesses to pages with watchpoints go via the
796 watchpoint trap routines. */
797 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
798 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
799 /* Avoid trapping reads of pages with a write breakpoint. */
800 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200801 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000802 *address |= TLB_MMIO;
803 break;
804 }
805 }
806 }
807
808 return iotlb;
809}
bellard9fa3e852004-01-04 18:06:42 +0000810#endif /* defined(CONFIG_USER_ONLY) */
811
pbrooke2eef172008-06-08 01:09:01 +0000812#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000813
Anthony Liguoric227f092009-10-01 16:12:16 -0500814static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200815 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200816static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200817
Stefan Weil575ddeb2013-09-29 20:56:45 +0200818static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200819
820/*
821 * Set a custom physical guest memory alloator.
822 * Accelerators with unusual needs may need this. Hopefully, we can
823 * get rid of it eventually.
824 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200825void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200826{
827 phys_mem_alloc = alloc;
828}
829
Avi Kivity5312bd82012-02-12 18:32:55 +0200830static uint16_t phys_section_add(MemoryRegionSection *section)
831{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200832 /* The physical section number is ORed with a page-aligned
833 * pointer to produce the iotlb entries. Thus it should
834 * never overflow into the page-aligned value.
835 */
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200836 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200837
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200838 if (next_map.sections_nb == next_map.sections_nb_alloc) {
839 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
840 16);
841 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
842 next_map.sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200843 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200844 next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200845 memory_region_ref(section->mr);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200846 return next_map.sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200847}
848
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200849static void phys_section_destroy(MemoryRegion *mr)
850{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200851 memory_region_unref(mr);
852
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200853 if (mr->subpage) {
854 subpage_t *subpage = container_of(mr, subpage_t, iomem);
855 memory_region_destroy(&subpage->iomem);
856 g_free(subpage);
857 }
858}
859
Paolo Bonzini60926662013-05-29 12:30:26 +0200860static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200861{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200862 while (map->sections_nb > 0) {
863 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200864 phys_section_destroy(section->mr);
865 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200866 g_free(map->sections);
867 g_free(map->nodes);
Paolo Bonzini60926662013-05-29 12:30:26 +0200868 g_free(map);
Avi Kivity5312bd82012-02-12 18:32:55 +0200869}
870
Avi Kivityac1970f2012-10-03 16:22:53 +0200871static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200872{
873 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200874 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200875 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200876 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200877 next_map.nodes, next_map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200878 MemoryRegionSection subsection = {
879 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200880 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200881 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200882 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200883
Avi Kivityf3705d52012-03-08 16:16:34 +0200884 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200885
Avi Kivityf3705d52012-03-08 16:16:34 +0200886 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200887 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200888 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200889 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200890 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200891 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200892 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200893 }
894 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200895 end = start + int128_get64(section->size) - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200896 subpage_register(subpage, start, end, phys_section_add(section));
897}
898
899
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200900static void register_multipage(AddressSpaceDispatch *d,
901 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000902{
Avi Kivitya8170e52012-10-23 12:30:10 +0200903 hwaddr start_addr = section->offset_within_address_space;
Avi Kivity5312bd82012-02-12 18:32:55 +0200904 uint16_t section_index = phys_section_add(section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200905 uint64_t num_pages = int128_get64(int128_rshift(section->size,
906 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200907
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200908 assert(num_pages);
909 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000910}
911
Avi Kivityac1970f2012-10-03 16:22:53 +0200912static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200913{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200914 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200915 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200916 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200917 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200918
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200919 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
920 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
921 - now.offset_within_address_space;
922
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200923 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200924 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200925 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200926 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200927 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200928 while (int128_ne(remain.size, now.size)) {
929 remain.size = int128_sub(remain.size, now.size);
930 remain.offset_within_address_space += int128_get64(now.size);
931 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400932 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200933 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200934 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800935 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200936 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200937 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400938 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200939 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200940 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400941 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200942 }
943}
944
Sheng Yang62a27442010-01-26 19:21:16 +0800945void qemu_flush_coalesced_mmio_buffer(void)
946{
947 if (kvm_enabled())
948 kvm_flush_coalesced_mmio_buffer();
949}
950
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700951void qemu_mutex_lock_ramlist(void)
952{
953 qemu_mutex_lock(&ram_list.mutex);
954}
955
956void qemu_mutex_unlock_ramlist(void)
957{
958 qemu_mutex_unlock(&ram_list.mutex);
959}
960
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200961#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300962
963#include <sys/vfs.h>
964
965#define HUGETLBFS_MAGIC 0x958458f6
966
967static long gethugepagesize(const char *path)
968{
969 struct statfs fs;
970 int ret;
971
972 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900973 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300974 } while (ret != 0 && errno == EINTR);
975
976 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900977 perror(path);
978 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300979 }
980
981 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900982 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300983
984 return fs.f_bsize;
985}
986
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200987static sigjmp_buf sigjump;
988
989static void sigbus_handler(int signal)
990{
991 siglongjmp(sigjump, 1);
992}
993
Alex Williamson04b16652010-07-02 11:13:17 -0600994static void *file_ram_alloc(RAMBlock *block,
995 ram_addr_t memory,
996 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300997{
998 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500999 char *sanitized_name;
1000 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001001 void *area;
1002 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001003 unsigned long hpagesize;
1004
1005 hpagesize = gethugepagesize(path);
1006 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001007 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001008 }
1009
1010 if (memory < hpagesize) {
1011 return NULL;
1012 }
1013
1014 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1015 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1016 return NULL;
1017 }
1018
Peter Feiner8ca761f2013-03-04 13:54:25 -05001019 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1020 sanitized_name = g_strdup(block->mr->name);
1021 for (c = sanitized_name; *c != '\0'; c++) {
1022 if (*c == '/')
1023 *c = '_';
1024 }
1025
1026 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1027 sanitized_name);
1028 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001029
1030 fd = mkstemp(filename);
1031 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001032 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001033 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001034 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001035 }
1036 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001037 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001038
1039 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1040
1041 /*
1042 * ftruncate is not supported by hugetlbfs in older
1043 * hosts, so don't bother bailing out on errors.
1044 * If anything goes wrong with it under other filesystems,
1045 * mmap will fail.
1046 */
1047 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001048 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001049
Marcelo Tosattic9027602010-03-01 20:25:08 -03001050 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001051 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001052 perror("file_ram_alloc: can't mmap RAM pages");
1053 close(fd);
1054 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001055 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001056
1057 if (mem_prealloc) {
1058 int ret, i;
1059 struct sigaction act, oldact;
1060 sigset_t set, oldset;
1061
1062 memset(&act, 0, sizeof(act));
1063 act.sa_handler = &sigbus_handler;
1064 act.sa_flags = 0;
1065
1066 ret = sigaction(SIGBUS, &act, &oldact);
1067 if (ret) {
1068 perror("file_ram_alloc: failed to install signal handler");
1069 exit(1);
1070 }
1071
1072 /* unblock SIGBUS */
1073 sigemptyset(&set);
1074 sigaddset(&set, SIGBUS);
1075 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1076
1077 if (sigsetjmp(sigjump, 1)) {
1078 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1079 exit(1);
1080 }
1081
1082 /* MAP_POPULATE silently ignores failures */
1083 for (i = 0; i < (memory/hpagesize)-1; i++) {
1084 memset(area + (hpagesize*i), 0, 1);
1085 }
1086
1087 ret = sigaction(SIGBUS, &oldact, NULL);
1088 if (ret) {
1089 perror("file_ram_alloc: failed to reinstall signal handler");
1090 exit(1);
1091 }
1092
1093 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1094 }
1095
Alex Williamson04b16652010-07-02 11:13:17 -06001096 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001097 return area;
1098}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001099#else
1100static void *file_ram_alloc(RAMBlock *block,
1101 ram_addr_t memory,
1102 const char *path)
1103{
1104 fprintf(stderr, "-mem-path not supported on this host\n");
1105 exit(1);
1106}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001107#endif
1108
Alex Williamsond17b5282010-06-25 11:08:38 -06001109static ram_addr_t find_ram_offset(ram_addr_t size)
1110{
Alex Williamson04b16652010-07-02 11:13:17 -06001111 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001112 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001113
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001114 assert(size != 0); /* it would hand out same offset multiple times */
1115
Paolo Bonzinia3161032012-11-14 15:54:48 +01001116 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001117 return 0;
1118
Paolo Bonzinia3161032012-11-14 15:54:48 +01001119 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001120 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001121
1122 end = block->offset + block->length;
1123
Paolo Bonzinia3161032012-11-14 15:54:48 +01001124 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001125 if (next_block->offset >= end) {
1126 next = MIN(next, next_block->offset);
1127 }
1128 }
1129 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001130 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001131 mingap = next - end;
1132 }
1133 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001134
1135 if (offset == RAM_ADDR_MAX) {
1136 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1137 (uint64_t)size);
1138 abort();
1139 }
1140
Alex Williamson04b16652010-07-02 11:13:17 -06001141 return offset;
1142}
1143
Juan Quintela652d7ec2012-07-20 10:37:54 +02001144ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001145{
Alex Williamsond17b5282010-06-25 11:08:38 -06001146 RAMBlock *block;
1147 ram_addr_t last = 0;
1148
Paolo Bonzinia3161032012-11-14 15:54:48 +01001149 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001150 last = MAX(last, block->offset + block->length);
1151
1152 return last;
1153}
1154
Jason Baronddb97f12012-08-02 15:44:16 -04001155static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1156{
1157 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001158
1159 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001160 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1161 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001162 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1163 if (ret) {
1164 perror("qemu_madvise");
1165 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1166 "but dump_guest_core=off specified\n");
1167 }
1168 }
1169}
1170
Avi Kivityc5705a72011-12-20 15:59:12 +02001171void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001172{
1173 RAMBlock *new_block, *block;
1174
Avi Kivityc5705a72011-12-20 15:59:12 +02001175 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001176 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001177 if (block->offset == addr) {
1178 new_block = block;
1179 break;
1180 }
1181 }
1182 assert(new_block);
1183 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001184
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001185 if (dev) {
1186 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001187 if (id) {
1188 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001189 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001190 }
1191 }
1192 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1193
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001194 /* This assumes the iothread lock is taken here too. */
1195 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001196 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001197 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001198 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1199 new_block->idstr);
1200 abort();
1201 }
1202 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001203 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001204}
1205
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001206static int memory_try_enable_merging(void *addr, size_t len)
1207{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001208 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001209 /* disabled by the user */
1210 return 0;
1211 }
1212
1213 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1214}
1215
Avi Kivityc5705a72011-12-20 15:59:12 +02001216ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1217 MemoryRegion *mr)
1218{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001219 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001220
1221 size = TARGET_PAGE_ALIGN(size);
1222 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001223 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001224
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001225 /* This assumes the iothread lock is taken here too. */
1226 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001227 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001228 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001229 if (host) {
1230 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001231 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001232 } else if (xen_enabled()) {
1233 if (mem_path) {
1234 fprintf(stderr, "-mem-path not supported with Xen\n");
1235 exit(1);
1236 }
1237 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001238 } else {
1239 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001240 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1241 /*
1242 * file_ram_alloc() needs to allocate just like
1243 * phys_mem_alloc, but we haven't bothered to provide
1244 * a hook there.
1245 */
1246 fprintf(stderr,
1247 "-mem-path not supported with this accelerator\n");
1248 exit(1);
1249 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001250 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001251 }
1252 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001253 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001254 if (!new_block->host) {
1255 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1256 new_block->mr->name, strerror(errno));
1257 exit(1);
1258 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001259 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001260 }
1261 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001262 new_block->length = size;
1263
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001264 /* Keep the list sorted from biggest to smallest block. */
1265 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1266 if (block->length < new_block->length) {
1267 break;
1268 }
1269 }
1270 if (block) {
1271 QTAILQ_INSERT_BEFORE(block, new_block, next);
1272 } else {
1273 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1274 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001275 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001276
Umesh Deshpandef798b072011-08-18 11:41:17 -07001277 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001278 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001279
Anthony Liguori7267c092011-08-20 22:09:37 -05001280 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001281 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001282 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1283 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001284 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001285
Jason Baronddb97f12012-08-02 15:44:16 -04001286 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001287 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001288 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001289
Cam Macdonell84b89d72010-07-26 18:10:57 -06001290 if (kvm_enabled())
1291 kvm_setup_guest_memory(new_block->host, size);
1292
1293 return new_block->offset;
1294}
1295
Avi Kivityc5705a72011-12-20 15:59:12 +02001296ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001297{
Avi Kivityc5705a72011-12-20 15:59:12 +02001298 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001299}
bellarde9a1ab12007-02-08 23:08:38 +00001300
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001301void qemu_ram_free_from_ptr(ram_addr_t addr)
1302{
1303 RAMBlock *block;
1304
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001305 /* This assumes the iothread lock is taken here too. */
1306 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001307 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001308 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001309 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001310 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001311 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001312 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001313 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001314 }
1315 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001316 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001317}
1318
Anthony Liguoric227f092009-10-01 16:12:16 -05001319void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001320{
Alex Williamson04b16652010-07-02 11:13:17 -06001321 RAMBlock *block;
1322
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001323 /* This assumes the iothread lock is taken here too. */
1324 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001325 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001326 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001327 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001328 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001329 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001330 if (block->flags & RAM_PREALLOC_MASK) {
1331 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001332 } else if (xen_enabled()) {
1333 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001334#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001335 } else if (block->fd >= 0) {
1336 munmap(block->host, block->length);
1337 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001338#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001339 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001340 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001341 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001342 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001343 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001344 }
1345 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001346 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001347
bellarde9a1ab12007-02-08 23:08:38 +00001348}
1349
Huang Yingcd19cfa2011-03-02 08:56:19 +01001350#ifndef _WIN32
1351void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1352{
1353 RAMBlock *block;
1354 ram_addr_t offset;
1355 int flags;
1356 void *area, *vaddr;
1357
Paolo Bonzinia3161032012-11-14 15:54:48 +01001358 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001359 offset = addr - block->offset;
1360 if (offset < block->length) {
1361 vaddr = block->host + offset;
1362 if (block->flags & RAM_PREALLOC_MASK) {
1363 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001364 } else if (xen_enabled()) {
1365 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001366 } else {
1367 flags = MAP_FIXED;
1368 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001369 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001370#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001371 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1372 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001373#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001374 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001375#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001376 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1377 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001378 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001379 /*
1380 * Remap needs to match alloc. Accelerators that
1381 * set phys_mem_alloc never remap. If they did,
1382 * we'd need a remap hook here.
1383 */
1384 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1385
Huang Yingcd19cfa2011-03-02 08:56:19 +01001386 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1387 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1388 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001389 }
1390 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001391 fprintf(stderr, "Could not remap addr: "
1392 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001393 length, addr);
1394 exit(1);
1395 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001396 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001397 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001398 }
1399 return;
1400 }
1401 }
1402}
1403#endif /* !_WIN32 */
1404
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001405/* Return a host pointer to ram allocated with qemu_ram_alloc.
1406 With the exception of the softmmu code in this file, this should
1407 only be used for local memory (e.g. video ram) that the device owns,
1408 and knows it isn't going to access beyond the end of the block.
1409
1410 It should not be used for general purpose DMA.
1411 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1412 */
1413void *qemu_get_ram_ptr(ram_addr_t addr)
1414{
1415 RAMBlock *block = qemu_get_ram_block(addr);
1416
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001417 if (xen_enabled()) {
1418 /* We need to check if the requested address is in the RAM
1419 * because we don't want to map the entire memory in QEMU.
1420 * In that case just map until the end of the page.
1421 */
1422 if (block->offset == 0) {
1423 return xen_map_cache(addr, 0, 0);
1424 } else if (block->host == NULL) {
1425 block->host =
1426 xen_map_cache(block->offset, block->length, 1);
1427 }
1428 }
1429 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001430}
1431
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001432/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1433 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001434static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001435{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001436 if (*size == 0) {
1437 return NULL;
1438 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001439 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001440 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001441 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001442 RAMBlock *block;
1443
Paolo Bonzinia3161032012-11-14 15:54:48 +01001444 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001445 if (addr - block->offset < block->length) {
1446 if (addr - block->offset + *size > block->length)
1447 *size = block->length - addr + block->offset;
1448 return block->host + (addr - block->offset);
1449 }
1450 }
1451
1452 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1453 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001454 }
1455}
1456
Paolo Bonzini7443b432013-06-03 12:44:02 +02001457/* Some of the softmmu routines need to translate from a host pointer
1458 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001459MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001460{
pbrook94a6b542009-04-11 17:15:54 +00001461 RAMBlock *block;
1462 uint8_t *host = ptr;
1463
Jan Kiszka868bb332011-06-21 22:59:09 +02001464 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001465 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001466 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001467 }
1468
Paolo Bonzini23887b72013-05-06 14:28:39 +02001469 block = ram_list.mru_block;
1470 if (block && block->host && host - block->host < block->length) {
1471 goto found;
1472 }
1473
Paolo Bonzinia3161032012-11-14 15:54:48 +01001474 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001475 /* This case append when the block is not mapped. */
1476 if (block->host == NULL) {
1477 continue;
1478 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001479 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001480 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001481 }
pbrook94a6b542009-04-11 17:15:54 +00001482 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001483
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001484 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001485
1486found:
1487 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001488 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001489}
Alex Williamsonf471a172010-06-11 11:11:42 -06001490
Avi Kivitya8170e52012-10-23 12:30:10 +02001491static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001492 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001493{
bellard3a7d9292005-08-21 09:26:42 +00001494 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001495 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001496 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001497 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001498 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001499 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001500 switch (size) {
1501 case 1:
1502 stb_p(qemu_get_ram_ptr(ram_addr), val);
1503 break;
1504 case 2:
1505 stw_p(qemu_get_ram_ptr(ram_addr), val);
1506 break;
1507 case 4:
1508 stl_p(qemu_get_ram_ptr(ram_addr), val);
1509 break;
1510 default:
1511 abort();
1512 }
bellardf23db162005-08-21 19:12:28 +00001513 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001514 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001515 /* we remove the notdirty callback only if the code has been
1516 flushed */
Andreas Färber4917cf42013-05-27 05:17:50 +02001517 if (dirty_flags == 0xff) {
1518 CPUArchState *env = current_cpu->env_ptr;
1519 tlb_set_dirty(env, env->mem_io_vaddr);
1520 }
bellard1ccde1c2004-02-06 19:46:14 +00001521}
1522
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001523static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1524 unsigned size, bool is_write)
1525{
1526 return is_write;
1527}
1528
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001529static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001530 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001531 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001532 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001533};
1534
pbrook0f459d12008-06-09 00:20:13 +00001535/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001536static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001537{
Andreas Färber4917cf42013-05-27 05:17:50 +02001538 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001539 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001540 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001541 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001542 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001543
aliguori06d55cc2008-11-18 20:24:06 +00001544 if (env->watchpoint_hit) {
1545 /* We re-entered the check after replacing the TB. Now raise
1546 * the debug interrupt so that is will trigger after the
1547 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001548 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001549 return;
1550 }
pbrook2e70f6e2008-06-29 01:03:05 +00001551 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001552 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001553 if ((vaddr == (wp->vaddr & len_mask) ||
1554 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001555 wp->flags |= BP_WATCHPOINT_HIT;
1556 if (!env->watchpoint_hit) {
1557 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001558 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001559 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1560 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001561 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001562 } else {
1563 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1564 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001565 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001566 }
aliguori06d55cc2008-11-18 20:24:06 +00001567 }
aliguori6e140f22008-11-18 20:37:55 +00001568 } else {
1569 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001570 }
1571 }
1572}
1573
pbrook6658ffb2007-03-16 23:58:11 +00001574/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1575 so these check for a hit then pass through to the normal out-of-line
1576 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001577static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001578 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001579{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001580 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1581 switch (size) {
1582 case 1: return ldub_phys(addr);
1583 case 2: return lduw_phys(addr);
1584 case 4: return ldl_phys(addr);
1585 default: abort();
1586 }
pbrook6658ffb2007-03-16 23:58:11 +00001587}
1588
Avi Kivitya8170e52012-10-23 12:30:10 +02001589static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001590 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001591{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001592 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1593 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001594 case 1:
1595 stb_phys(addr, val);
1596 break;
1597 case 2:
1598 stw_phys(addr, val);
1599 break;
1600 case 4:
1601 stl_phys(addr, val);
1602 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001603 default: abort();
1604 }
pbrook6658ffb2007-03-16 23:58:11 +00001605}
1606
Avi Kivity1ec9b902012-01-02 12:47:48 +02001607static const MemoryRegionOps watch_mem_ops = {
1608 .read = watch_mem_read,
1609 .write = watch_mem_write,
1610 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001611};
pbrook6658ffb2007-03-16 23:58:11 +00001612
Avi Kivitya8170e52012-10-23 12:30:10 +02001613static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001614 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001615{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001616 subpage_t *subpage = opaque;
1617 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001618
blueswir1db7b5422007-05-26 17:36:03 +00001619#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001620 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001621 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001622#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001623 address_space_read(subpage->as, addr + subpage->base, buf, len);
1624 switch (len) {
1625 case 1:
1626 return ldub_p(buf);
1627 case 2:
1628 return lduw_p(buf);
1629 case 4:
1630 return ldl_p(buf);
1631 default:
1632 abort();
1633 }
blueswir1db7b5422007-05-26 17:36:03 +00001634}
1635
Avi Kivitya8170e52012-10-23 12:30:10 +02001636static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001637 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001638{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001639 subpage_t *subpage = opaque;
1640 uint8_t buf[4];
1641
blueswir1db7b5422007-05-26 17:36:03 +00001642#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001643 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001644 " value %"PRIx64"\n",
1645 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001646#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001647 switch (len) {
1648 case 1:
1649 stb_p(buf, value);
1650 break;
1651 case 2:
1652 stw_p(buf, value);
1653 break;
1654 case 4:
1655 stl_p(buf, value);
1656 break;
1657 default:
1658 abort();
1659 }
1660 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001661}
1662
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001663static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001664 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001665{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001666 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001667#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001668 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001669 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001670#endif
1671
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001672 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001673 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001674}
1675
Avi Kivity70c68e42012-01-02 12:32:48 +02001676static const MemoryRegionOps subpage_ops = {
1677 .read = subpage_read,
1678 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001679 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001680 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001681};
1682
Anthony Liguoric227f092009-10-01 16:12:16 -05001683static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001684 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001685{
1686 int idx, eidx;
1687
1688 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1689 return -1;
1690 idx = SUBPAGE_IDX(start);
1691 eidx = SUBPAGE_IDX(end);
1692#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001693 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1694 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001695#endif
blueswir1db7b5422007-05-26 17:36:03 +00001696 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001697 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001698 }
1699
1700 return 0;
1701}
1702
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001703static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001704{
Anthony Liguoric227f092009-10-01 16:12:16 -05001705 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001706
Anthony Liguori7267c092011-08-20 22:09:37 -05001707 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001708
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001709 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001710 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001711 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001712 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001713 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001714#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001715 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1716 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001717#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001718 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001719
1720 return mmio;
1721}
1722
Avi Kivity5312bd82012-02-12 18:32:55 +02001723static uint16_t dummy_section(MemoryRegion *mr)
1724{
1725 MemoryRegionSection section = {
1726 .mr = mr,
1727 .offset_within_address_space = 0,
1728 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001729 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001730 };
1731
1732 return phys_section_add(&section);
1733}
1734
Avi Kivitya8170e52012-10-23 12:30:10 +02001735MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001736{
Paolo Bonzini0475d942013-05-29 12:28:21 +02001737 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001738}
1739
Avi Kivitye9179ce2009-06-14 11:38:52 +03001740static void io_mem_init(void)
1741{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001742 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1743 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001744 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001745 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001746 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001747 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001748 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001749}
1750
Avi Kivityac1970f2012-10-03 16:22:53 +02001751static void mem_begin(MemoryListener *listener)
1752{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001753 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001754 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1755
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001756 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001757 d->as = as;
1758 as->next_dispatch = d;
1759}
1760
1761static void mem_commit(MemoryListener *listener)
1762{
1763 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001764 AddressSpaceDispatch *cur = as->dispatch;
1765 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001766
Paolo Bonzini0475d942013-05-29 12:28:21 +02001767 next->nodes = next_map.nodes;
1768 next->sections = next_map.sections;
1769
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001770 phys_page_compact_all(next, next_map.nodes_nb);
1771
Paolo Bonzini0475d942013-05-29 12:28:21 +02001772 as->dispatch = next;
1773 g_free(cur);
Avi Kivityac1970f2012-10-03 16:22:53 +02001774}
1775
Avi Kivity50c1e142012-02-08 21:36:02 +02001776static void core_begin(MemoryListener *listener)
1777{
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001778 uint16_t n;
1779
Paolo Bonzini60926662013-05-29 12:30:26 +02001780 prev_map = g_new(PhysPageMap, 1);
1781 *prev_map = next_map;
1782
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001783 memset(&next_map, 0, sizeof(next_map));
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001784 n = dummy_section(&io_mem_unassigned);
1785 assert(n == PHYS_SECTION_UNASSIGNED);
1786 n = dummy_section(&io_mem_notdirty);
1787 assert(n == PHYS_SECTION_NOTDIRTY);
1788 n = dummy_section(&io_mem_rom);
1789 assert(n == PHYS_SECTION_ROM);
1790 n = dummy_section(&io_mem_watch);
1791 assert(n == PHYS_SECTION_WATCH);
Avi Kivity50c1e142012-02-08 21:36:02 +02001792}
1793
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001794/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1795 * All AddressSpaceDispatch instances have switched to the next map.
1796 */
1797static void core_commit(MemoryListener *listener)
1798{
Paolo Bonzini60926662013-05-29 12:30:26 +02001799 phys_sections_free(prev_map);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001800}
1801
Avi Kivity1d711482012-10-02 18:54:45 +02001802static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001803{
Andreas Färber182735e2013-05-29 22:29:20 +02001804 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001805
1806 /* since each CPU stores ram addresses in its TLB cache, we must
1807 reset the modified entries */
1808 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001809 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001810 CPUArchState *env = cpu->env_ptr;
1811
Avi Kivity117712c2012-02-12 21:23:17 +02001812 tlb_flush(env, 1);
1813 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001814}
1815
Avi Kivity93632742012-02-08 16:54:16 +02001816static void core_log_global_start(MemoryListener *listener)
1817{
1818 cpu_physical_memory_set_dirty_tracking(1);
1819}
1820
1821static void core_log_global_stop(MemoryListener *listener)
1822{
1823 cpu_physical_memory_set_dirty_tracking(0);
1824}
1825
Avi Kivity93632742012-02-08 16:54:16 +02001826static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001827 .begin = core_begin,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001828 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02001829 .log_global_start = core_log_global_start,
1830 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001831 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001832};
1833
Avi Kivity1d711482012-10-02 18:54:45 +02001834static MemoryListener tcg_memory_listener = {
1835 .commit = tcg_commit,
1836};
1837
Avi Kivityac1970f2012-10-03 16:22:53 +02001838void address_space_init_dispatch(AddressSpace *as)
1839{
Paolo Bonzini00752702013-05-29 12:13:54 +02001840 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001841 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001842 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001843 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001844 .region_add = mem_add,
1845 .region_nop = mem_add,
1846 .priority = 0,
1847 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001848 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001849}
1850
Avi Kivity83f3c252012-10-07 12:59:55 +02001851void address_space_destroy_dispatch(AddressSpace *as)
1852{
1853 AddressSpaceDispatch *d = as->dispatch;
1854
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001855 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001856 g_free(d);
1857 as->dispatch = NULL;
1858}
1859
Avi Kivity62152b82011-07-26 14:26:14 +03001860static void memory_map_init(void)
1861{
Anthony Liguori7267c092011-08-20 22:09:37 -05001862 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001863
Paolo Bonzini57271d62013-11-07 17:14:37 +01001864 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001865 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001866
Anthony Liguori7267c092011-08-20 22:09:37 -05001867 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001868 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1869 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001870 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001871
Avi Kivityf6790af2012-10-02 20:13:51 +02001872 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001873 if (tcg_enabled()) {
1874 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1875 }
Avi Kivity62152b82011-07-26 14:26:14 +03001876}
1877
1878MemoryRegion *get_system_memory(void)
1879{
1880 return system_memory;
1881}
1882
Avi Kivity309cb472011-08-08 16:09:03 +03001883MemoryRegion *get_system_io(void)
1884{
1885 return system_io;
1886}
1887
pbrooke2eef172008-06-08 01:09:01 +00001888#endif /* !defined(CONFIG_USER_ONLY) */
1889
bellard13eb76e2004-01-24 15:23:36 +00001890/* physical memory access (slow version, mainly for debug) */
1891#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001892int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001893 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001894{
1895 int l, flags;
1896 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001897 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001898
1899 while (len > 0) {
1900 page = addr & TARGET_PAGE_MASK;
1901 l = (page + TARGET_PAGE_SIZE) - addr;
1902 if (l > len)
1903 l = len;
1904 flags = page_get_flags(page);
1905 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001906 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001907 if (is_write) {
1908 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001909 return -1;
bellard579a97f2007-11-11 14:26:47 +00001910 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001911 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001912 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001913 memcpy(p, buf, l);
1914 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001915 } else {
1916 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001917 return -1;
bellard579a97f2007-11-11 14:26:47 +00001918 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001919 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001920 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001921 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001922 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001923 }
1924 len -= l;
1925 buf += l;
1926 addr += l;
1927 }
Paul Brooka68fe892010-03-01 00:08:59 +00001928 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001929}
bellard8df1cd02005-01-28 22:37:22 +00001930
bellard13eb76e2004-01-24 15:23:36 +00001931#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001932
Avi Kivitya8170e52012-10-23 12:30:10 +02001933static void invalidate_and_set_dirty(hwaddr addr,
1934 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001935{
1936 if (!cpu_physical_memory_is_dirty(addr)) {
1937 /* invalidate code */
1938 tb_invalidate_phys_page_range(addr, addr + length, 0);
1939 /* set dirty bit */
1940 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1941 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001942 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001943}
1944
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001945static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1946{
1947 if (memory_region_is_ram(mr)) {
1948 return !(is_write && mr->readonly);
1949 }
1950 if (memory_region_is_romd(mr)) {
1951 return !is_write;
1952 }
1953
1954 return false;
1955}
1956
Richard Henderson23326162013-07-08 14:55:59 -07001957static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001958{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001959 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001960
1961 /* Regions are assumed to support 1-4 byte accesses unless
1962 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001963 if (access_size_max == 0) {
1964 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001965 }
Richard Henderson23326162013-07-08 14:55:59 -07001966
1967 /* Bound the maximum access by the alignment of the address. */
1968 if (!mr->ops->impl.unaligned) {
1969 unsigned align_size_max = addr & -addr;
1970 if (align_size_max != 0 && align_size_max < access_size_max) {
1971 access_size_max = align_size_max;
1972 }
1973 }
1974
1975 /* Don't attempt accesses larger than the maximum. */
1976 if (l > access_size_max) {
1977 l = access_size_max;
1978 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001979 if (l & (l - 1)) {
1980 l = 1 << (qemu_fls(l) - 1);
1981 }
Richard Henderson23326162013-07-08 14:55:59 -07001982
1983 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001984}
1985
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001986bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001987 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001988{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001989 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001990 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001991 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001992 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001993 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001994 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001995
bellard13eb76e2004-01-24 15:23:36 +00001996 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001997 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001998 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001999
bellard13eb76e2004-01-24 15:23:36 +00002000 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002001 if (!memory_access_is_direct(mr, is_write)) {
2002 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002003 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002004 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002005 switch (l) {
2006 case 8:
2007 /* 64 bit write access */
2008 val = ldq_p(buf);
2009 error |= io_mem_write(mr, addr1, val, 8);
2010 break;
2011 case 4:
bellard1c213d12005-09-03 10:49:04 +00002012 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002013 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002014 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002015 break;
2016 case 2:
bellard1c213d12005-09-03 10:49:04 +00002017 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002018 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002019 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002020 break;
2021 case 1:
bellard1c213d12005-09-03 10:49:04 +00002022 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002023 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002024 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002025 break;
2026 default:
2027 abort();
bellard13eb76e2004-01-24 15:23:36 +00002028 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002029 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002030 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002031 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002032 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002033 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002034 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002035 }
2036 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002037 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002038 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002039 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002040 switch (l) {
2041 case 8:
2042 /* 64 bit read access */
2043 error |= io_mem_read(mr, addr1, &val, 8);
2044 stq_p(buf, val);
2045 break;
2046 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002047 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002048 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002049 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002050 break;
2051 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002052 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002053 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002054 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002055 break;
2056 case 1:
bellard1c213d12005-09-03 10:49:04 +00002057 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002058 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002059 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002060 break;
2061 default:
2062 abort();
bellard13eb76e2004-01-24 15:23:36 +00002063 }
2064 } else {
2065 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002066 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002067 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002068 }
2069 }
2070 len -= l;
2071 buf += l;
2072 addr += l;
2073 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002074
2075 return error;
bellard13eb76e2004-01-24 15:23:36 +00002076}
bellard8df1cd02005-01-28 22:37:22 +00002077
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002078bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002079 const uint8_t *buf, int len)
2080{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002081 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002082}
2083
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002084bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002085{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002086 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002087}
2088
2089
Avi Kivitya8170e52012-10-23 12:30:10 +02002090void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002091 int len, int is_write)
2092{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002093 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002094}
2095
bellardd0ecd2a2006-04-23 17:14:48 +00002096/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002097void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002098 const uint8_t *buf, int len)
2099{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002100 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002101 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002102 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002103 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002104
bellardd0ecd2a2006-04-23 17:14:48 +00002105 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002106 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002107 mr = address_space_translate(&address_space_memory,
2108 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002109
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002110 if (!(memory_region_is_ram(mr) ||
2111 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002112 /* do nothing */
2113 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002114 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002115 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002116 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002117 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002118 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002119 }
2120 len -= l;
2121 buf += l;
2122 addr += l;
2123 }
2124}
2125
aliguori6d16c2f2009-01-22 16:59:11 +00002126typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002127 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002128 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002129 hwaddr addr;
2130 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002131} BounceBuffer;
2132
2133static BounceBuffer bounce;
2134
aliguoriba223c22009-01-22 16:59:16 +00002135typedef struct MapClient {
2136 void *opaque;
2137 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002138 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002139} MapClient;
2140
Blue Swirl72cf2d42009-09-12 07:36:22 +00002141static QLIST_HEAD(map_client_list, MapClient) map_client_list
2142 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002143
2144void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2145{
Anthony Liguori7267c092011-08-20 22:09:37 -05002146 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002147
2148 client->opaque = opaque;
2149 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002150 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002151 return client;
2152}
2153
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002154static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002155{
2156 MapClient *client = (MapClient *)_client;
2157
Blue Swirl72cf2d42009-09-12 07:36:22 +00002158 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002159 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002160}
2161
2162static void cpu_notify_map_clients(void)
2163{
2164 MapClient *client;
2165
Blue Swirl72cf2d42009-09-12 07:36:22 +00002166 while (!QLIST_EMPTY(&map_client_list)) {
2167 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002168 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002169 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002170 }
2171}
2172
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002173bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2174{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002175 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002176 hwaddr l, xlat;
2177
2178 while (len > 0) {
2179 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002180 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2181 if (!memory_access_is_direct(mr, is_write)) {
2182 l = memory_access_size(mr, l, addr);
2183 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002184 return false;
2185 }
2186 }
2187
2188 len -= l;
2189 addr += l;
2190 }
2191 return true;
2192}
2193
aliguori6d16c2f2009-01-22 16:59:11 +00002194/* Map a physical memory region into a host virtual address.
2195 * May map a subset of the requested range, given by and returned in *plen.
2196 * May return NULL if resources needed to perform the mapping are exhausted.
2197 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002198 * Use cpu_register_map_client() to know when retrying the map operation is
2199 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002200 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002201void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002202 hwaddr addr,
2203 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002204 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002205{
Avi Kivitya8170e52012-10-23 12:30:10 +02002206 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002207 hwaddr done = 0;
2208 hwaddr l, xlat, base;
2209 MemoryRegion *mr, *this_mr;
2210 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002211
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002212 if (len == 0) {
2213 return NULL;
2214 }
aliguori6d16c2f2009-01-22 16:59:11 +00002215
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002216 l = len;
2217 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2218 if (!memory_access_is_direct(mr, is_write)) {
2219 if (bounce.buffer) {
2220 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002221 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002222 /* Avoid unbounded allocations */
2223 l = MIN(l, TARGET_PAGE_SIZE);
2224 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002225 bounce.addr = addr;
2226 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002227
2228 memory_region_ref(mr);
2229 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002230 if (!is_write) {
2231 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002232 }
aliguori6d16c2f2009-01-22 16:59:11 +00002233
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002234 *plen = l;
2235 return bounce.buffer;
2236 }
2237
2238 base = xlat;
2239 raddr = memory_region_get_ram_addr(mr);
2240
2241 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002242 len -= l;
2243 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002244 done += l;
2245 if (len == 0) {
2246 break;
2247 }
2248
2249 l = len;
2250 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2251 if (this_mr != mr || xlat != base + done) {
2252 break;
2253 }
aliguori6d16c2f2009-01-22 16:59:11 +00002254 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002255
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002256 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002257 *plen = done;
2258 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002259}
2260
Avi Kivityac1970f2012-10-03 16:22:53 +02002261/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002262 * Will also mark the memory as dirty if is_write == 1. access_len gives
2263 * the amount of memory that was actually read or written by the caller.
2264 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002265void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2266 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002267{
2268 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002269 MemoryRegion *mr;
2270 ram_addr_t addr1;
2271
2272 mr = qemu_ram_addr_from_host(buffer, &addr1);
2273 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002274 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002275 while (access_len) {
2276 unsigned l;
2277 l = TARGET_PAGE_SIZE;
2278 if (l > access_len)
2279 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002280 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002281 addr1 += l;
2282 access_len -= l;
2283 }
2284 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002285 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002286 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002287 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002288 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002289 return;
2290 }
2291 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002292 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002293 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002294 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002295 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002296 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002297 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002298}
bellardd0ecd2a2006-04-23 17:14:48 +00002299
Avi Kivitya8170e52012-10-23 12:30:10 +02002300void *cpu_physical_memory_map(hwaddr addr,
2301 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002302 int is_write)
2303{
2304 return address_space_map(&address_space_memory, addr, plen, is_write);
2305}
2306
Avi Kivitya8170e52012-10-23 12:30:10 +02002307void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2308 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002309{
2310 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2311}
2312
bellard8df1cd02005-01-28 22:37:22 +00002313/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002314static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002315 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002316{
bellard8df1cd02005-01-28 22:37:22 +00002317 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002318 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002319 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002320 hwaddr l = 4;
2321 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002322
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002323 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2324 false);
2325 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002326 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002327 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002328#if defined(TARGET_WORDS_BIGENDIAN)
2329 if (endian == DEVICE_LITTLE_ENDIAN) {
2330 val = bswap32(val);
2331 }
2332#else
2333 if (endian == DEVICE_BIG_ENDIAN) {
2334 val = bswap32(val);
2335 }
2336#endif
bellard8df1cd02005-01-28 22:37:22 +00002337 } else {
2338 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002339 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002340 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002341 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002342 switch (endian) {
2343 case DEVICE_LITTLE_ENDIAN:
2344 val = ldl_le_p(ptr);
2345 break;
2346 case DEVICE_BIG_ENDIAN:
2347 val = ldl_be_p(ptr);
2348 break;
2349 default:
2350 val = ldl_p(ptr);
2351 break;
2352 }
bellard8df1cd02005-01-28 22:37:22 +00002353 }
2354 return val;
2355}
2356
Avi Kivitya8170e52012-10-23 12:30:10 +02002357uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002358{
2359 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2360}
2361
Avi Kivitya8170e52012-10-23 12:30:10 +02002362uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002363{
2364 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2365}
2366
Avi Kivitya8170e52012-10-23 12:30:10 +02002367uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002368{
2369 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2370}
2371
bellard84b7b8e2005-11-28 21:19:04 +00002372/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002373static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002374 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002375{
bellard84b7b8e2005-11-28 21:19:04 +00002376 uint8_t *ptr;
2377 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002378 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002379 hwaddr l = 8;
2380 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002381
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002382 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2383 false);
2384 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002385 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002386 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002387#if defined(TARGET_WORDS_BIGENDIAN)
2388 if (endian == DEVICE_LITTLE_ENDIAN) {
2389 val = bswap64(val);
2390 }
2391#else
2392 if (endian == DEVICE_BIG_ENDIAN) {
2393 val = bswap64(val);
2394 }
2395#endif
bellard84b7b8e2005-11-28 21:19:04 +00002396 } else {
2397 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002398 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002399 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002400 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002401 switch (endian) {
2402 case DEVICE_LITTLE_ENDIAN:
2403 val = ldq_le_p(ptr);
2404 break;
2405 case DEVICE_BIG_ENDIAN:
2406 val = ldq_be_p(ptr);
2407 break;
2408 default:
2409 val = ldq_p(ptr);
2410 break;
2411 }
bellard84b7b8e2005-11-28 21:19:04 +00002412 }
2413 return val;
2414}
2415
Avi Kivitya8170e52012-10-23 12:30:10 +02002416uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002417{
2418 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2419}
2420
Avi Kivitya8170e52012-10-23 12:30:10 +02002421uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002422{
2423 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2424}
2425
Avi Kivitya8170e52012-10-23 12:30:10 +02002426uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002427{
2428 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2429}
2430
bellardaab33092005-10-30 20:48:42 +00002431/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002432uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002433{
2434 uint8_t val;
2435 cpu_physical_memory_read(addr, &val, 1);
2436 return val;
2437}
2438
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002439/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002440static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002441 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002442{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002443 uint8_t *ptr;
2444 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002445 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002446 hwaddr l = 2;
2447 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002448
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002449 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2450 false);
2451 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002452 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002453 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002454#if defined(TARGET_WORDS_BIGENDIAN)
2455 if (endian == DEVICE_LITTLE_ENDIAN) {
2456 val = bswap16(val);
2457 }
2458#else
2459 if (endian == DEVICE_BIG_ENDIAN) {
2460 val = bswap16(val);
2461 }
2462#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002463 } else {
2464 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002465 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002466 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002467 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002468 switch (endian) {
2469 case DEVICE_LITTLE_ENDIAN:
2470 val = lduw_le_p(ptr);
2471 break;
2472 case DEVICE_BIG_ENDIAN:
2473 val = lduw_be_p(ptr);
2474 break;
2475 default:
2476 val = lduw_p(ptr);
2477 break;
2478 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002479 }
2480 return val;
bellardaab33092005-10-30 20:48:42 +00002481}
2482
Avi Kivitya8170e52012-10-23 12:30:10 +02002483uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002484{
2485 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2486}
2487
Avi Kivitya8170e52012-10-23 12:30:10 +02002488uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002489{
2490 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2491}
2492
Avi Kivitya8170e52012-10-23 12:30:10 +02002493uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002494{
2495 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2496}
2497
bellard8df1cd02005-01-28 22:37:22 +00002498/* warning: addr must be aligned. The ram page is not masked as dirty
2499 and the code inside is not invalidated. It is useful if the dirty
2500 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002501void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002502{
bellard8df1cd02005-01-28 22:37:22 +00002503 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002504 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002505 hwaddr l = 4;
2506 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002507
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002508 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2509 true);
2510 if (l < 4 || !memory_access_is_direct(mr, true)) {
2511 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002512 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002513 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002514 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002515 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002516
2517 if (unlikely(in_migration)) {
2518 if (!cpu_physical_memory_is_dirty(addr1)) {
2519 /* invalidate code */
2520 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2521 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002522 cpu_physical_memory_set_dirty_flags(
2523 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002524 }
2525 }
bellard8df1cd02005-01-28 22:37:22 +00002526 }
2527}
2528
2529/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002530static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002531 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002532{
bellard8df1cd02005-01-28 22:37:22 +00002533 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002534 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002535 hwaddr l = 4;
2536 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002537
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002538 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2539 true);
2540 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002541#if defined(TARGET_WORDS_BIGENDIAN)
2542 if (endian == DEVICE_LITTLE_ENDIAN) {
2543 val = bswap32(val);
2544 }
2545#else
2546 if (endian == DEVICE_BIG_ENDIAN) {
2547 val = bswap32(val);
2548 }
2549#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002550 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002551 } else {
bellard8df1cd02005-01-28 22:37:22 +00002552 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002553 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002554 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002555 switch (endian) {
2556 case DEVICE_LITTLE_ENDIAN:
2557 stl_le_p(ptr, val);
2558 break;
2559 case DEVICE_BIG_ENDIAN:
2560 stl_be_p(ptr, val);
2561 break;
2562 default:
2563 stl_p(ptr, val);
2564 break;
2565 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002566 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002567 }
2568}
2569
Avi Kivitya8170e52012-10-23 12:30:10 +02002570void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002571{
2572 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2573}
2574
Avi Kivitya8170e52012-10-23 12:30:10 +02002575void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002576{
2577 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2578}
2579
Avi Kivitya8170e52012-10-23 12:30:10 +02002580void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002581{
2582 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2583}
2584
bellardaab33092005-10-30 20:48:42 +00002585/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002586void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002587{
2588 uint8_t v = val;
2589 cpu_physical_memory_write(addr, &v, 1);
2590}
2591
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002592/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002593static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002594 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002595{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002596 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002597 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002598 hwaddr l = 2;
2599 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002600
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002601 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2602 true);
2603 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002604#if defined(TARGET_WORDS_BIGENDIAN)
2605 if (endian == DEVICE_LITTLE_ENDIAN) {
2606 val = bswap16(val);
2607 }
2608#else
2609 if (endian == DEVICE_BIG_ENDIAN) {
2610 val = bswap16(val);
2611 }
2612#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002613 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002614 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002615 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002616 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002617 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002618 switch (endian) {
2619 case DEVICE_LITTLE_ENDIAN:
2620 stw_le_p(ptr, val);
2621 break;
2622 case DEVICE_BIG_ENDIAN:
2623 stw_be_p(ptr, val);
2624 break;
2625 default:
2626 stw_p(ptr, val);
2627 break;
2628 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002629 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002630 }
bellardaab33092005-10-30 20:48:42 +00002631}
2632
Avi Kivitya8170e52012-10-23 12:30:10 +02002633void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002634{
2635 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2636}
2637
Avi Kivitya8170e52012-10-23 12:30:10 +02002638void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002639{
2640 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2641}
2642
Avi Kivitya8170e52012-10-23 12:30:10 +02002643void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002644{
2645 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2646}
2647
bellardaab33092005-10-30 20:48:42 +00002648/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002649void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002650{
2651 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002652 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002653}
2654
Avi Kivitya8170e52012-10-23 12:30:10 +02002655void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002656{
2657 val = cpu_to_le64(val);
2658 cpu_physical_memory_write(addr, &val, 8);
2659}
2660
Avi Kivitya8170e52012-10-23 12:30:10 +02002661void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002662{
2663 val = cpu_to_be64(val);
2664 cpu_physical_memory_write(addr, &val, 8);
2665}
2666
aliguori5e2972f2009-03-28 17:51:36 +00002667/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002668int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002669 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002670{
2671 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002672 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002673 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002674
2675 while (len > 0) {
2676 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002677 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002678 /* if no physical page mapped, return an error */
2679 if (phys_addr == -1)
2680 return -1;
2681 l = (page + TARGET_PAGE_SIZE) - addr;
2682 if (l > len)
2683 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002684 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002685 if (is_write)
2686 cpu_physical_memory_write_rom(phys_addr, buf, l);
2687 else
aliguori5e2972f2009-03-28 17:51:36 +00002688 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002689 len -= l;
2690 buf += l;
2691 addr += l;
2692 }
2693 return 0;
2694}
Paul Brooka68fe892010-03-01 00:08:59 +00002695#endif
bellard13eb76e2004-01-24 15:23:36 +00002696
Blue Swirl8e4a4242013-01-06 18:30:17 +00002697#if !defined(CONFIG_USER_ONLY)
2698
2699/*
2700 * A helper function for the _utterly broken_ virtio device model to find out if
2701 * it's running on a big endian machine. Don't do this at home kids!
2702 */
2703bool virtio_is_big_endian(void);
2704bool virtio_is_big_endian(void)
2705{
2706#if defined(TARGET_WORDS_BIGENDIAN)
2707 return true;
2708#else
2709 return false;
2710#endif
2711}
2712
2713#endif
2714
Wen Congyang76f35532012-05-07 12:04:18 +08002715#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002716bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002717{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002718 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002719 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002720
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002721 mr = address_space_translate(&address_space_memory,
2722 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002723
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002724 return !(memory_region_is_ram(mr) ||
2725 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002726}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002727
2728void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2729{
2730 RAMBlock *block;
2731
2732 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2733 func(block->host, block->offset, block->length, opaque);
2734 }
2735}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002736#endif