blob: 7e5ce9394cedb24016332e197749fe0b01d7a0ab [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020054#include "qemu/range.h"
55
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
aliguori74576192008-10-06 14:02:03 +000059static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000060
Paolo Bonzinia3161032012-11-14 15:54:48 +010061RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030062
63static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030064static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030065
Avi Kivityf6790af2012-10-02 20:13:51 +020066AddressSpace address_space_io;
67AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Paolo Bonzini0844e002013-05-24 14:37:28 +020069MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020070static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färberbdc44642013-06-24 23:50:24 +020074struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020077DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020085typedef struct PhysPageEntry PhysPageEntry;
86
87struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020088 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020089 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020090 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020091 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020092};
93
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020094#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
95
Paolo Bonzini03f49952013-11-07 17:14:36 +010096/* Size of the L2 (and L3, etc) page tables. */
97#define ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
98
99#define P_L2_BITS 10
100#define P_L2_SIZE (1 << P_L2_BITS)
101
102#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
103
104typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200105
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200106struct AddressSpaceDispatch {
107 /* This is a multi-level map on the physical address space.
108 * The bottom level has pointers to MemoryRegionSections.
109 */
110 PhysPageEntry phys_map;
Paolo Bonzini0475d942013-05-29 12:28:21 +0200111 Node *nodes;
112 MemoryRegionSection *sections;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200113 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Jan Kiszka90260c62013-05-26 21:46:51 +0200116#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
117typedef struct subpage_t {
118 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200119 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200120 hwaddr base;
121 uint16_t sub_section[TARGET_PAGE_SIZE];
122} subpage_t;
123
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200124#define PHYS_SECTION_UNASSIGNED 0
125#define PHYS_SECTION_NOTDIRTY 1
126#define PHYS_SECTION_ROM 2
127#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200128
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200129typedef struct PhysPageMap {
130 unsigned sections_nb;
131 unsigned sections_nb_alloc;
132 unsigned nodes_nb;
133 unsigned nodes_nb_alloc;
134 Node *nodes;
135 MemoryRegionSection *sections;
136} PhysPageMap;
137
Paolo Bonzini60926662013-05-29 12:30:26 +0200138static PhysPageMap *prev_map;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200139static PhysPageMap next_map;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200140
pbrooke2eef172008-06-08 01:09:01 +0000141static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300142static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000143
Avi Kivity1ec9b902012-01-02 12:47:48 +0200144static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000145#endif
bellard54936002003-05-13 00:25:15 +0000146
Paul Brook6d9a1302010-02-28 23:55:53 +0000147#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200148
Avi Kivityf7bf5462012-02-13 20:12:05 +0200149static void phys_map_node_reserve(unsigned nodes)
150{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200151 if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
152 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
153 16);
154 next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
155 next_map.nodes_nb + nodes);
156 next_map.nodes = g_renew(Node, next_map.nodes,
157 next_map.nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200158 }
159}
160
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200161static uint32_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162{
163 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200164 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200166 ret = next_map.nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167 assert(ret != PHYS_MAP_NODE_NIL);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200168 assert(ret != next_map.nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100169 for (i = 0; i < P_L2_SIZE; ++i) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200170 next_map.nodes[ret][i].skip = 1;
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200171 next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200172 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200173 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200174}
175
Avi Kivitya8170e52012-10-23 12:30:10 +0200176static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
177 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200178 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200179{
180 PhysPageEntry *p;
181 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100182 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200183
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200184 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200185 lp->ptr = phys_map_node_alloc();
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200186 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100188 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200189 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200190 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191 }
192 }
193 } else {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200194 p = next_map.nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200195 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100196 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200197
Paolo Bonzini03f49952013-11-07 17:14:36 +0100198 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200199 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200200 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200201 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200202 *index += step;
203 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200204 } else {
205 phys_page_set_level(lp, index, nb, leaf, level - 1);
206 }
207 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200208 }
209}
210
Avi Kivityac1970f2012-10-03 16:22:53 +0200211static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200212 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200213 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000214{
Avi Kivity29990972012-02-13 20:21:20 +0200215 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200216 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000217
Avi Kivityac1970f2012-10-03 16:22:53 +0200218 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000219}
220
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200221/* Compact a non leaf page entry. Simply detect that the entry has a single child,
222 * and update our entry so we can skip it and go directly to the destination.
223 */
224static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
225{
226 unsigned valid_ptr = P_L2_SIZE;
227 int valid = 0;
228 PhysPageEntry *p;
229 int i;
230
231 if (lp->ptr == PHYS_MAP_NODE_NIL) {
232 return;
233 }
234
235 p = nodes[lp->ptr];
236 for (i = 0; i < P_L2_SIZE; i++) {
237 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
238 continue;
239 }
240
241 valid_ptr = i;
242 valid++;
243 if (p[i].skip) {
244 phys_page_compact(&p[i], nodes, compacted);
245 }
246 }
247
248 /* We can only compress if there's only one child. */
249 if (valid != 1) {
250 return;
251 }
252
253 assert(valid_ptr < P_L2_SIZE);
254
255 /* Don't compress if it won't fit in the # of bits we have. */
256 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
257 return;
258 }
259
260 lp->ptr = p[valid_ptr].ptr;
261 if (!p[valid_ptr].skip) {
262 /* If our only child is a leaf, make this a leaf. */
263 /* By design, we should have made this node a leaf to begin with so we
264 * should never reach here.
265 * But since it's so simple to handle this, let's do it just in case we
266 * change this rule.
267 */
268 lp->skip = 0;
269 } else {
270 lp->skip += p[valid_ptr].skip;
271 }
272}
273
274static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
275{
276 DECLARE_BITMAP(compacted, nodes_nb);
277
278 if (d->phys_map.skip) {
279 phys_page_compact(&d->phys_map, d->nodes, compacted);
280 }
281}
282
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200283static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200284 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000285{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200286 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200287 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200288 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200289
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200290 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200291 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200292 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200293 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200294 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100295 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200296 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200297
298 if (sections[lp.ptr].size.hi ||
299 range_covers_byte(sections[lp.ptr].offset_within_address_space,
300 sections[lp.ptr].size.lo, addr)) {
301 return &sections[lp.ptr];
302 } else {
303 return &sections[PHYS_SECTION_UNASSIGNED];
304 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200305}
306
Blue Swirle5548612012-04-21 13:08:33 +0000307bool memory_region_is_unassigned(MemoryRegion *mr)
308{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200309 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000310 && mr != &io_mem_watch;
311}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200312
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200313static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200314 hwaddr addr,
315 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200316{
Jan Kiszka90260c62013-05-26 21:46:51 +0200317 MemoryRegionSection *section;
318 subpage_t *subpage;
319
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200320 section = phys_page_find(d->phys_map, addr, d->nodes, d->sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200321 if (resolve_subpage && section->mr->subpage) {
322 subpage = container_of(section->mr, subpage_t, iomem);
Paolo Bonzini0475d942013-05-29 12:28:21 +0200323 section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200324 }
325 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200326}
327
Jan Kiszka90260c62013-05-26 21:46:51 +0200328static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200329address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200330 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200331{
332 MemoryRegionSection *section;
333 Int128 diff;
334
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200335 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200336 /* Compute offset within MemoryRegionSection */
337 addr -= section->offset_within_address_space;
338
339 /* Compute offset within MemoryRegion */
340 *xlat = addr + section->offset_within_region;
341
342 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100343 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200344 return section;
345}
Jan Kiszka90260c62013-05-26 21:46:51 +0200346
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200347MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
348 hwaddr *xlat, hwaddr *plen,
349 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200350{
Avi Kivity30951152012-10-30 13:47:46 +0200351 IOMMUTLBEntry iotlb;
352 MemoryRegionSection *section;
353 MemoryRegion *mr;
354 hwaddr len = *plen;
355
356 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200357 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200358 mr = section->mr;
359
360 if (!mr->iommu_ops) {
361 break;
362 }
363
364 iotlb = mr->iommu_ops->translate(mr, addr);
365 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
366 | (addr & iotlb.addr_mask));
367 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
368 if (!(iotlb.perm & (1 << is_write))) {
369 mr = &io_mem_unassigned;
370 break;
371 }
372
373 as = iotlb.target_as;
374 }
375
376 *plen = len;
377 *xlat = addr;
378 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200379}
380
381MemoryRegionSection *
382address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
383 hwaddr *plen)
384{
Avi Kivity30951152012-10-30 13:47:46 +0200385 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200386 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200387
388 assert(!section->mr->iommu_ops);
389 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200390}
bellard9fa3e852004-01-04 18:06:42 +0000391#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000392
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200393void cpu_exec_init_all(void)
394{
395#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700396 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200397 memory_map_init();
398 io_mem_init();
399#endif
400}
401
Andreas Färberb170fce2013-01-20 20:23:22 +0100402#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000403
Juan Quintelae59fb372009-09-29 22:48:21 +0200404static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200405{
Andreas Färber259186a2013-01-17 18:51:17 +0100406 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200407
aurel323098dba2009-03-07 21:28:24 +0000408 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
409 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100410 cpu->interrupt_request &= ~0x01;
411 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000412
413 return 0;
414}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200415
Andreas Färber1a1562f2013-06-17 04:09:11 +0200416const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200417 .name = "cpu_common",
418 .version_id = 1,
419 .minimum_version_id = 1,
420 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200421 .post_load = cpu_common_post_load,
422 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100423 VMSTATE_UINT32(halted, CPUState),
424 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200425 VMSTATE_END_OF_LIST()
426 }
427};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200428
pbrook9656f322008-07-01 20:01:19 +0000429#endif
430
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100431CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400432{
Andreas Färberbdc44642013-06-24 23:50:24 +0200433 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400434
Andreas Färberbdc44642013-06-24 23:50:24 +0200435 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100436 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200437 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100438 }
Glauber Costa950f1472009-06-09 12:15:18 -0400439 }
440
Andreas Färberbdc44642013-06-24 23:50:24 +0200441 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400442}
443
Andreas Färber9349b4f2012-03-14 01:38:32 +0100444void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000445{
Andreas Färber9f09e182012-05-03 06:59:07 +0200446 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100447 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200448 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000449 int cpu_index;
450
pbrookc2764712009-03-07 15:24:59 +0000451#if defined(CONFIG_USER_ONLY)
452 cpu_list_lock();
453#endif
bellard6a00d602005-11-21 23:25:50 +0000454 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200455 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000456 cpu_index++;
457 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100458 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100459 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000460 QTAILQ_INIT(&env->breakpoints);
461 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100462#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200463 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100464#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200465 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000466#if defined(CONFIG_USER_ONLY)
467 cpu_list_unlock();
468#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200469 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
470 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
471 }
pbrookb3c77242008-06-30 16:31:04 +0000472#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600473 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000474 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100475 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200476 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000477#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100478 if (cc->vmsd != NULL) {
479 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
480 }
bellardfd6ce8f2003-05-14 19:00:11 +0000481}
482
bellard1fddef42005-04-17 19:16:13 +0000483#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000484#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200485static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000486{
487 tb_invalidate_phys_page_range(pc, pc + 1, 0);
488}
489#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200490static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400491{
Max Filippove8262a12013-09-27 22:29:17 +0400492 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
493 if (phys != -1) {
494 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
495 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400496}
bellardc27004e2005-01-03 23:35:10 +0000497#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000498#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000499
Paul Brookc527ee82010-03-01 03:31:14 +0000500#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100501void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000502
503{
504}
505
Andreas Färber9349b4f2012-03-14 01:38:32 +0100506int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000507 int flags, CPUWatchpoint **watchpoint)
508{
509 return -ENOSYS;
510}
511#else
pbrook6658ffb2007-03-16 23:58:11 +0000512/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100513int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000514 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000515{
aliguorib4051332008-11-18 20:14:20 +0000516 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000517 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000518
aliguorib4051332008-11-18 20:14:20 +0000519 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400520 if ((len & (len - 1)) || (addr & ~len_mask) ||
521 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000522 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
523 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
524 return -EINVAL;
525 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500526 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000527
aliguoria1d1bb32008-11-18 20:07:32 +0000528 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000529 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000530 wp->flags = flags;
531
aliguori2dc9f412008-11-18 20:56:59 +0000532 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000533 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000534 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000535 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000536 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000537
pbrook6658ffb2007-03-16 23:58:11 +0000538 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000539
540 if (watchpoint)
541 *watchpoint = wp;
542 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000543}
544
aliguoria1d1bb32008-11-18 20:07:32 +0000545/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100546int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000547 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000548{
aliguorib4051332008-11-18 20:14:20 +0000549 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000550 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000551
Blue Swirl72cf2d42009-09-12 07:36:22 +0000552 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000553 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000554 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000555 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000556 return 0;
557 }
558 }
aliguoria1d1bb32008-11-18 20:07:32 +0000559 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000560}
561
aliguoria1d1bb32008-11-18 20:07:32 +0000562/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100563void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000564{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000566
aliguoria1d1bb32008-11-18 20:07:32 +0000567 tlb_flush_page(env, watchpoint->vaddr);
568
Anthony Liguori7267c092011-08-20 22:09:37 -0500569 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000570}
571
aliguoria1d1bb32008-11-18 20:07:32 +0000572/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100573void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000574{
aliguoric0ce9982008-11-25 22:13:57 +0000575 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000576
Blue Swirl72cf2d42009-09-12 07:36:22 +0000577 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000578 if (wp->flags & mask)
579 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000580 }
aliguoria1d1bb32008-11-18 20:07:32 +0000581}
Paul Brookc527ee82010-03-01 03:31:14 +0000582#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000583
584/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100585int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000586 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000587{
bellard1fddef42005-04-17 19:16:13 +0000588#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000589 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000590
Anthony Liguori7267c092011-08-20 22:09:37 -0500591 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000592
593 bp->pc = pc;
594 bp->flags = flags;
595
aliguori2dc9f412008-11-18 20:56:59 +0000596 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200597 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000598 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200599 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000600 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200601 }
aliguoria1d1bb32008-11-18 20:07:32 +0000602
Andreas Färber00b941e2013-06-29 18:55:54 +0200603 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000604
Andreas Färber00b941e2013-06-29 18:55:54 +0200605 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000606 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200607 }
aliguoria1d1bb32008-11-18 20:07:32 +0000608 return 0;
609#else
610 return -ENOSYS;
611#endif
612}
613
614/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100615int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000616{
617#if defined(TARGET_HAS_ICE)
618 CPUBreakpoint *bp;
619
Blue Swirl72cf2d42009-09-12 07:36:22 +0000620 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000621 if (bp->pc == pc && bp->flags == flags) {
622 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000623 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000624 }
bellard4c3a88a2003-07-26 12:06:08 +0000625 }
aliguoria1d1bb32008-11-18 20:07:32 +0000626 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000627#else
aliguoria1d1bb32008-11-18 20:07:32 +0000628 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000629#endif
630}
631
aliguoria1d1bb32008-11-18 20:07:32 +0000632/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100633void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000634{
bellard1fddef42005-04-17 19:16:13 +0000635#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000636 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000637
Andreas Färber00b941e2013-06-29 18:55:54 +0200638 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000639
Anthony Liguori7267c092011-08-20 22:09:37 -0500640 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000641#endif
642}
643
644/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100645void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000646{
647#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000648 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000649
Blue Swirl72cf2d42009-09-12 07:36:22 +0000650 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000651 if (bp->flags & mask)
652 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000653 }
bellard4c3a88a2003-07-26 12:06:08 +0000654#endif
655}
656
bellardc33a3462003-07-29 20:50:33 +0000657/* enable or disable single step mode. EXCP_DEBUG is returned by the
658 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200659void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000660{
bellard1fddef42005-04-17 19:16:13 +0000661#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200662 if (cpu->singlestep_enabled != enabled) {
663 cpu->singlestep_enabled = enabled;
664 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200665 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200666 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100667 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000668 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200669 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000670 tb_flush(env);
671 }
bellardc33a3462003-07-29 20:50:33 +0000672 }
673#endif
674}
675
Andreas Färber9349b4f2012-03-14 01:38:32 +0100676void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000677{
Andreas Färber878096e2013-05-27 01:33:50 +0200678 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000679 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000680 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000681
682 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000683 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000684 fprintf(stderr, "qemu: fatal: ");
685 vfprintf(stderr, fmt, ap);
686 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200687 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000688 if (qemu_log_enabled()) {
689 qemu_log("qemu: fatal: ");
690 qemu_log_vprintf(fmt, ap2);
691 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200692 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000693 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000694 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000695 }
pbrook493ae1f2007-11-23 16:53:59 +0000696 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000697 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200698#if defined(CONFIG_USER_ONLY)
699 {
700 struct sigaction act;
701 sigfillset(&act.sa_mask);
702 act.sa_handler = SIG_DFL;
703 sigaction(SIGABRT, &act, NULL);
704 }
705#endif
bellard75012672003-06-21 13:11:07 +0000706 abort();
707}
708
bellard01243112004-01-04 15:48:17 +0000709#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200710static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
711{
712 RAMBlock *block;
713
714 /* The list is protected by the iothread lock here. */
715 block = ram_list.mru_block;
716 if (block && addr - block->offset < block->length) {
717 goto found;
718 }
719 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
720 if (addr - block->offset < block->length) {
721 goto found;
722 }
723 }
724
725 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
726 abort();
727
728found:
729 ram_list.mru_block = block;
730 return block;
731}
732
Juan Quintelad24981d2012-05-22 00:42:40 +0200733static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
734 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000735{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200736 RAMBlock *block;
737 ram_addr_t start1;
bellardf23db162005-08-21 19:12:28 +0000738
Paolo Bonzini041603f2013-09-09 17:49:45 +0200739 block = qemu_get_ram_block(start);
740 assert(block == qemu_get_ram_block(end - 1));
741 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000742 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200743}
744
745/* Note: start and end must be within the same ram block. */
746void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
747 int dirty_flags)
748{
749 uintptr_t length;
750
751 start &= TARGET_PAGE_MASK;
752 end = TARGET_PAGE_ALIGN(end);
753
754 length = end - start;
755 if (length == 0)
756 return;
757 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
758
759 if (tcg_enabled()) {
760 tlb_reset_dirty_range_all(start, end, length);
761 }
bellard1ccde1c2004-02-06 19:46:14 +0000762}
763
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000764static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000765{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200766 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000767 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200768 return ret;
aliguori74576192008-10-06 14:02:03 +0000769}
770
Avi Kivitya8170e52012-10-23 12:30:10 +0200771hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200772 MemoryRegionSection *section,
773 target_ulong vaddr,
774 hwaddr paddr, hwaddr xlat,
775 int prot,
776 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000777{
Avi Kivitya8170e52012-10-23 12:30:10 +0200778 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000779 CPUWatchpoint *wp;
780
Blue Swirlcc5bea62012-04-14 14:56:48 +0000781 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000782 /* Normal RAM. */
783 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200784 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000785 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200786 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000787 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200788 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000789 }
790 } else {
Paolo Bonzini0475d942013-05-29 12:28:21 +0200791 iotlb = section - address_space_memory.dispatch->sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200792 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000793 }
794
795 /* Make accesses to pages with watchpoints go via the
796 watchpoint trap routines. */
797 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
798 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
799 /* Avoid trapping reads of pages with a write breakpoint. */
800 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200801 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000802 *address |= TLB_MMIO;
803 break;
804 }
805 }
806 }
807
808 return iotlb;
809}
bellard9fa3e852004-01-04 18:06:42 +0000810#endif /* defined(CONFIG_USER_ONLY) */
811
pbrooke2eef172008-06-08 01:09:01 +0000812#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000813
Anthony Liguoric227f092009-10-01 16:12:16 -0500814static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200815 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200816static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200817
Stefan Weil575ddeb2013-09-29 20:56:45 +0200818static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200819
820/*
821 * Set a custom physical guest memory alloator.
822 * Accelerators with unusual needs may need this. Hopefully, we can
823 * get rid of it eventually.
824 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200825void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200826{
827 phys_mem_alloc = alloc;
828}
829
Avi Kivity5312bd82012-02-12 18:32:55 +0200830static uint16_t phys_section_add(MemoryRegionSection *section)
831{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200832 /* The physical section number is ORed with a page-aligned
833 * pointer to produce the iotlb entries. Thus it should
834 * never overflow into the page-aligned value.
835 */
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200836 assert(next_map.sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200837
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200838 if (next_map.sections_nb == next_map.sections_nb_alloc) {
839 next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
840 16);
841 next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
842 next_map.sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200843 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200844 next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200845 memory_region_ref(section->mr);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200846 return next_map.sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200847}
848
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200849static void phys_section_destroy(MemoryRegion *mr)
850{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200851 memory_region_unref(mr);
852
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200853 if (mr->subpage) {
854 subpage_t *subpage = container_of(mr, subpage_t, iomem);
855 memory_region_destroy(&subpage->iomem);
856 g_free(subpage);
857 }
858}
859
Paolo Bonzini60926662013-05-29 12:30:26 +0200860static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200861{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200862 while (map->sections_nb > 0) {
863 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200864 phys_section_destroy(section->mr);
865 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200866 g_free(map->sections);
867 g_free(map->nodes);
Paolo Bonzini60926662013-05-29 12:30:26 +0200868 g_free(map);
Avi Kivity5312bd82012-02-12 18:32:55 +0200869}
870
Avi Kivityac1970f2012-10-03 16:22:53 +0200871static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200872{
873 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200874 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200875 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200876 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200877 next_map.nodes, next_map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200878 MemoryRegionSection subsection = {
879 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200880 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200881 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200882 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200883
Avi Kivityf3705d52012-03-08 16:16:34 +0200884 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200885
Avi Kivityf3705d52012-03-08 16:16:34 +0200886 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200887 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200888 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200889 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +0200890 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200891 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200892 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200893 }
894 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200895 end = start + int128_get64(section->size) - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200896 subpage_register(subpage, start, end, phys_section_add(section));
897}
898
899
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200900static void register_multipage(AddressSpaceDispatch *d,
901 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000902{
Avi Kivitya8170e52012-10-23 12:30:10 +0200903 hwaddr start_addr = section->offset_within_address_space;
Avi Kivity5312bd82012-02-12 18:32:55 +0200904 uint16_t section_index = phys_section_add(section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200905 uint64_t num_pages = int128_get64(int128_rshift(section->size,
906 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200907
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200908 assert(num_pages);
909 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000910}
911
Avi Kivityac1970f2012-10-03 16:22:53 +0200912static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200913{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200914 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200915 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200916 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200917 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200918
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200919 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
920 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
921 - now.offset_within_address_space;
922
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200923 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200924 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200925 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200926 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200927 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200928 while (int128_ne(remain.size, now.size)) {
929 remain.size = int128_sub(remain.size, now.size);
930 remain.offset_within_address_space += int128_get64(now.size);
931 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400932 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200933 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200934 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800935 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200936 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200937 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400938 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200939 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200940 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400941 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200942 }
943}
944
Sheng Yang62a27442010-01-26 19:21:16 +0800945void qemu_flush_coalesced_mmio_buffer(void)
946{
947 if (kvm_enabled())
948 kvm_flush_coalesced_mmio_buffer();
949}
950
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700951void qemu_mutex_lock_ramlist(void)
952{
953 qemu_mutex_lock(&ram_list.mutex);
954}
955
956void qemu_mutex_unlock_ramlist(void)
957{
958 qemu_mutex_unlock(&ram_list.mutex);
959}
960
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200961#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300962
963#include <sys/vfs.h>
964
965#define HUGETLBFS_MAGIC 0x958458f6
966
967static long gethugepagesize(const char *path)
968{
969 struct statfs fs;
970 int ret;
971
972 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900973 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300974 } while (ret != 0 && errno == EINTR);
975
976 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900977 perror(path);
978 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300979 }
980
981 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900982 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300983
984 return fs.f_bsize;
985}
986
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200987static sigjmp_buf sigjump;
988
989static void sigbus_handler(int signal)
990{
991 siglongjmp(sigjump, 1);
992}
993
Alex Williamson04b16652010-07-02 11:13:17 -0600994static void *file_ram_alloc(RAMBlock *block,
995 ram_addr_t memory,
996 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300997{
998 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500999 char *sanitized_name;
1000 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001001 void *area;
1002 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001003 unsigned long hpagesize;
1004
1005 hpagesize = gethugepagesize(path);
1006 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001007 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001008 }
1009
1010 if (memory < hpagesize) {
1011 return NULL;
1012 }
1013
1014 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1015 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1016 return NULL;
1017 }
1018
Peter Feiner8ca761f2013-03-04 13:54:25 -05001019 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1020 sanitized_name = g_strdup(block->mr->name);
1021 for (c = sanitized_name; *c != '\0'; c++) {
1022 if (*c == '/')
1023 *c = '_';
1024 }
1025
1026 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1027 sanitized_name);
1028 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001029
1030 fd = mkstemp(filename);
1031 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001032 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001033 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001034 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001035 }
1036 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001037 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001038
1039 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1040
1041 /*
1042 * ftruncate is not supported by hugetlbfs in older
1043 * hosts, so don't bother bailing out on errors.
1044 * If anything goes wrong with it under other filesystems,
1045 * mmap will fail.
1046 */
1047 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001048 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001049
Marcelo Tosattic9027602010-03-01 20:25:08 -03001050 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001051 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001052 perror("file_ram_alloc: can't mmap RAM pages");
1053 close(fd);
1054 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001055 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001056
1057 if (mem_prealloc) {
1058 int ret, i;
1059 struct sigaction act, oldact;
1060 sigset_t set, oldset;
1061
1062 memset(&act, 0, sizeof(act));
1063 act.sa_handler = &sigbus_handler;
1064 act.sa_flags = 0;
1065
1066 ret = sigaction(SIGBUS, &act, &oldact);
1067 if (ret) {
1068 perror("file_ram_alloc: failed to install signal handler");
1069 exit(1);
1070 }
1071
1072 /* unblock SIGBUS */
1073 sigemptyset(&set);
1074 sigaddset(&set, SIGBUS);
1075 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1076
1077 if (sigsetjmp(sigjump, 1)) {
1078 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1079 exit(1);
1080 }
1081
1082 /* MAP_POPULATE silently ignores failures */
1083 for (i = 0; i < (memory/hpagesize)-1; i++) {
1084 memset(area + (hpagesize*i), 0, 1);
1085 }
1086
1087 ret = sigaction(SIGBUS, &oldact, NULL);
1088 if (ret) {
1089 perror("file_ram_alloc: failed to reinstall signal handler");
1090 exit(1);
1091 }
1092
1093 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1094 }
1095
Alex Williamson04b16652010-07-02 11:13:17 -06001096 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001097 return area;
1098}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001099#else
1100static void *file_ram_alloc(RAMBlock *block,
1101 ram_addr_t memory,
1102 const char *path)
1103{
1104 fprintf(stderr, "-mem-path not supported on this host\n");
1105 exit(1);
1106}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001107#endif
1108
Alex Williamsond17b5282010-06-25 11:08:38 -06001109static ram_addr_t find_ram_offset(ram_addr_t size)
1110{
Alex Williamson04b16652010-07-02 11:13:17 -06001111 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001112 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001113
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001114 assert(size != 0); /* it would hand out same offset multiple times */
1115
Paolo Bonzinia3161032012-11-14 15:54:48 +01001116 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001117 return 0;
1118
Paolo Bonzinia3161032012-11-14 15:54:48 +01001119 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001120 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001121
1122 end = block->offset + block->length;
1123
Paolo Bonzinia3161032012-11-14 15:54:48 +01001124 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001125 if (next_block->offset >= end) {
1126 next = MIN(next, next_block->offset);
1127 }
1128 }
1129 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001130 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001131 mingap = next - end;
1132 }
1133 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001134
1135 if (offset == RAM_ADDR_MAX) {
1136 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1137 (uint64_t)size);
1138 abort();
1139 }
1140
Alex Williamson04b16652010-07-02 11:13:17 -06001141 return offset;
1142}
1143
Juan Quintela652d7ec2012-07-20 10:37:54 +02001144ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001145{
Alex Williamsond17b5282010-06-25 11:08:38 -06001146 RAMBlock *block;
1147 ram_addr_t last = 0;
1148
Paolo Bonzinia3161032012-11-14 15:54:48 +01001149 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001150 last = MAX(last, block->offset + block->length);
1151
1152 return last;
1153}
1154
Jason Baronddb97f12012-08-02 15:44:16 -04001155static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1156{
1157 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001158
1159 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001160 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1161 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001162 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1163 if (ret) {
1164 perror("qemu_madvise");
1165 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1166 "but dump_guest_core=off specified\n");
1167 }
1168 }
1169}
1170
Avi Kivityc5705a72011-12-20 15:59:12 +02001171void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001172{
1173 RAMBlock *new_block, *block;
1174
Avi Kivityc5705a72011-12-20 15:59:12 +02001175 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001176 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001177 if (block->offset == addr) {
1178 new_block = block;
1179 break;
1180 }
1181 }
1182 assert(new_block);
1183 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001184
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001185 if (dev) {
1186 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001187 if (id) {
1188 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001189 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001190 }
1191 }
1192 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1193
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001194 /* This assumes the iothread lock is taken here too. */
1195 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001196 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001197 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001198 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1199 new_block->idstr);
1200 abort();
1201 }
1202 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001203 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001204}
1205
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001206static int memory_try_enable_merging(void *addr, size_t len)
1207{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001208 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001209 /* disabled by the user */
1210 return 0;
1211 }
1212
1213 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1214}
1215
Avi Kivityc5705a72011-12-20 15:59:12 +02001216ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1217 MemoryRegion *mr)
1218{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001219 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001220
1221 size = TARGET_PAGE_ALIGN(size);
1222 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001223 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001224
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001225 /* This assumes the iothread lock is taken here too. */
1226 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001227 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001228 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001229 if (host) {
1230 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001231 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001232 } else if (xen_enabled()) {
1233 if (mem_path) {
1234 fprintf(stderr, "-mem-path not supported with Xen\n");
1235 exit(1);
1236 }
1237 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001238 } else {
1239 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001240 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1241 /*
1242 * file_ram_alloc() needs to allocate just like
1243 * phys_mem_alloc, but we haven't bothered to provide
1244 * a hook there.
1245 */
1246 fprintf(stderr,
1247 "-mem-path not supported with this accelerator\n");
1248 exit(1);
1249 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001250 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001251 }
1252 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001253 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001254 if (!new_block->host) {
1255 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1256 new_block->mr->name, strerror(errno));
1257 exit(1);
1258 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001259 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001260 }
1261 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001262 new_block->length = size;
1263
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001264 /* Keep the list sorted from biggest to smallest block. */
1265 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1266 if (block->length < new_block->length) {
1267 break;
1268 }
1269 }
1270 if (block) {
1271 QTAILQ_INSERT_BEFORE(block, new_block, next);
1272 } else {
1273 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1274 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001275 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001276
Umesh Deshpandef798b072011-08-18 11:41:17 -07001277 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001278 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001279
Anthony Liguori7267c092011-08-20 22:09:37 -05001280 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001281 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001282 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1283 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001284 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001285
Jason Baronddb97f12012-08-02 15:44:16 -04001286 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001287 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001288 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001289
Cam Macdonell84b89d72010-07-26 18:10:57 -06001290 if (kvm_enabled())
1291 kvm_setup_guest_memory(new_block->host, size);
1292
1293 return new_block->offset;
1294}
1295
Avi Kivityc5705a72011-12-20 15:59:12 +02001296ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001297{
Avi Kivityc5705a72011-12-20 15:59:12 +02001298 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001299}
bellarde9a1ab12007-02-08 23:08:38 +00001300
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001301void qemu_ram_free_from_ptr(ram_addr_t addr)
1302{
1303 RAMBlock *block;
1304
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001305 /* This assumes the iothread lock is taken here too. */
1306 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001307 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001308 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001309 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001310 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001311 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001312 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001313 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001314 }
1315 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001316 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001317}
1318
Anthony Liguoric227f092009-10-01 16:12:16 -05001319void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001320{
Alex Williamson04b16652010-07-02 11:13:17 -06001321 RAMBlock *block;
1322
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001323 /* This assumes the iothread lock is taken here too. */
1324 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001325 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001326 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001327 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001328 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001329 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001330 if (block->flags & RAM_PREALLOC_MASK) {
1331 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001332 } else if (xen_enabled()) {
1333 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001334#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001335 } else if (block->fd >= 0) {
1336 munmap(block->host, block->length);
1337 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001338#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001339 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001340 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001341 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001342 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001343 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001344 }
1345 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001346 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001347
bellarde9a1ab12007-02-08 23:08:38 +00001348}
1349
Huang Yingcd19cfa2011-03-02 08:56:19 +01001350#ifndef _WIN32
1351void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1352{
1353 RAMBlock *block;
1354 ram_addr_t offset;
1355 int flags;
1356 void *area, *vaddr;
1357
Paolo Bonzinia3161032012-11-14 15:54:48 +01001358 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001359 offset = addr - block->offset;
1360 if (offset < block->length) {
1361 vaddr = block->host + offset;
1362 if (block->flags & RAM_PREALLOC_MASK) {
1363 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001364 } else if (xen_enabled()) {
1365 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001366 } else {
1367 flags = MAP_FIXED;
1368 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001369 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001370#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001371 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1372 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001373#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001374 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001375#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001376 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1377 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001378 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001379 /*
1380 * Remap needs to match alloc. Accelerators that
1381 * set phys_mem_alloc never remap. If they did,
1382 * we'd need a remap hook here.
1383 */
1384 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1385
Huang Yingcd19cfa2011-03-02 08:56:19 +01001386 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1387 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1388 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001389 }
1390 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001391 fprintf(stderr, "Could not remap addr: "
1392 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001393 length, addr);
1394 exit(1);
1395 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001396 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001397 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001398 }
1399 return;
1400 }
1401 }
1402}
1403#endif /* !_WIN32 */
1404
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001405/* Return a host pointer to ram allocated with qemu_ram_alloc.
1406 With the exception of the softmmu code in this file, this should
1407 only be used for local memory (e.g. video ram) that the device owns,
1408 and knows it isn't going to access beyond the end of the block.
1409
1410 It should not be used for general purpose DMA.
1411 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1412 */
1413void *qemu_get_ram_ptr(ram_addr_t addr)
1414{
1415 RAMBlock *block = qemu_get_ram_block(addr);
1416
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001417 if (xen_enabled()) {
1418 /* We need to check if the requested address is in the RAM
1419 * because we don't want to map the entire memory in QEMU.
1420 * In that case just map until the end of the page.
1421 */
1422 if (block->offset == 0) {
1423 return xen_map_cache(addr, 0, 0);
1424 } else if (block->host == NULL) {
1425 block->host =
1426 xen_map_cache(block->offset, block->length, 1);
1427 }
1428 }
1429 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001430}
1431
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001432/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1433 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001434static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001435{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001436 if (*size == 0) {
1437 return NULL;
1438 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001439 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001440 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001441 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001442 RAMBlock *block;
1443
Paolo Bonzinia3161032012-11-14 15:54:48 +01001444 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001445 if (addr - block->offset < block->length) {
1446 if (addr - block->offset + *size > block->length)
1447 *size = block->length - addr + block->offset;
1448 return block->host + (addr - block->offset);
1449 }
1450 }
1451
1452 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1453 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001454 }
1455}
1456
Paolo Bonzini7443b432013-06-03 12:44:02 +02001457/* Some of the softmmu routines need to translate from a host pointer
1458 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001459MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001460{
pbrook94a6b542009-04-11 17:15:54 +00001461 RAMBlock *block;
1462 uint8_t *host = ptr;
1463
Jan Kiszka868bb332011-06-21 22:59:09 +02001464 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001465 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001466 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001467 }
1468
Paolo Bonzini23887b72013-05-06 14:28:39 +02001469 block = ram_list.mru_block;
1470 if (block && block->host && host - block->host < block->length) {
1471 goto found;
1472 }
1473
Paolo Bonzinia3161032012-11-14 15:54:48 +01001474 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001475 /* This case append when the block is not mapped. */
1476 if (block->host == NULL) {
1477 continue;
1478 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001479 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001480 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001481 }
pbrook94a6b542009-04-11 17:15:54 +00001482 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001483
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001484 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001485
1486found:
1487 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001488 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001489}
Alex Williamsonf471a172010-06-11 11:11:42 -06001490
Avi Kivitya8170e52012-10-23 12:30:10 +02001491static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001492 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001493{
bellard3a7d9292005-08-21 09:26:42 +00001494 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001495 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001496 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001497 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001498 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001499 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001500 switch (size) {
1501 case 1:
1502 stb_p(qemu_get_ram_ptr(ram_addr), val);
1503 break;
1504 case 2:
1505 stw_p(qemu_get_ram_ptr(ram_addr), val);
1506 break;
1507 case 4:
1508 stl_p(qemu_get_ram_ptr(ram_addr), val);
1509 break;
1510 default:
1511 abort();
1512 }
bellardf23db162005-08-21 19:12:28 +00001513 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001514 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001515 /* we remove the notdirty callback only if the code has been
1516 flushed */
Andreas Färber4917cf42013-05-27 05:17:50 +02001517 if (dirty_flags == 0xff) {
1518 CPUArchState *env = current_cpu->env_ptr;
1519 tlb_set_dirty(env, env->mem_io_vaddr);
1520 }
bellard1ccde1c2004-02-06 19:46:14 +00001521}
1522
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001523static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1524 unsigned size, bool is_write)
1525{
1526 return is_write;
1527}
1528
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001529static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001530 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001531 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001532 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001533};
1534
pbrook0f459d12008-06-09 00:20:13 +00001535/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001536static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001537{
Andreas Färber4917cf42013-05-27 05:17:50 +02001538 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001539 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001540 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001541 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001542 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001543
aliguori06d55cc2008-11-18 20:24:06 +00001544 if (env->watchpoint_hit) {
1545 /* We re-entered the check after replacing the TB. Now raise
1546 * the debug interrupt so that is will trigger after the
1547 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001548 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001549 return;
1550 }
pbrook2e70f6e2008-06-29 01:03:05 +00001551 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001552 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001553 if ((vaddr == (wp->vaddr & len_mask) ||
1554 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001555 wp->flags |= BP_WATCHPOINT_HIT;
1556 if (!env->watchpoint_hit) {
1557 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001558 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001559 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1560 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001561 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001562 } else {
1563 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1564 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001565 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001566 }
aliguori06d55cc2008-11-18 20:24:06 +00001567 }
aliguori6e140f22008-11-18 20:37:55 +00001568 } else {
1569 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001570 }
1571 }
1572}
1573
pbrook6658ffb2007-03-16 23:58:11 +00001574/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1575 so these check for a hit then pass through to the normal out-of-line
1576 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001577static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001578 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001579{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001580 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1581 switch (size) {
1582 case 1: return ldub_phys(addr);
1583 case 2: return lduw_phys(addr);
1584 case 4: return ldl_phys(addr);
1585 default: abort();
1586 }
pbrook6658ffb2007-03-16 23:58:11 +00001587}
1588
Avi Kivitya8170e52012-10-23 12:30:10 +02001589static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001590 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001591{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001592 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1593 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001594 case 1:
1595 stb_phys(addr, val);
1596 break;
1597 case 2:
1598 stw_phys(addr, val);
1599 break;
1600 case 4:
1601 stl_phys(addr, val);
1602 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001603 default: abort();
1604 }
pbrook6658ffb2007-03-16 23:58:11 +00001605}
1606
Avi Kivity1ec9b902012-01-02 12:47:48 +02001607static const MemoryRegionOps watch_mem_ops = {
1608 .read = watch_mem_read,
1609 .write = watch_mem_write,
1610 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001611};
pbrook6658ffb2007-03-16 23:58:11 +00001612
Avi Kivitya8170e52012-10-23 12:30:10 +02001613static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001614 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001615{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001616 subpage_t *subpage = opaque;
1617 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001618
blueswir1db7b5422007-05-26 17:36:03 +00001619#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001620 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001621 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001622#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001623 address_space_read(subpage->as, addr + subpage->base, buf, len);
1624 switch (len) {
1625 case 1:
1626 return ldub_p(buf);
1627 case 2:
1628 return lduw_p(buf);
1629 case 4:
1630 return ldl_p(buf);
1631 default:
1632 abort();
1633 }
blueswir1db7b5422007-05-26 17:36:03 +00001634}
1635
Avi Kivitya8170e52012-10-23 12:30:10 +02001636static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001637 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001638{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001639 subpage_t *subpage = opaque;
1640 uint8_t buf[4];
1641
blueswir1db7b5422007-05-26 17:36:03 +00001642#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001643 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001644 " value %"PRIx64"\n",
1645 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001646#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001647 switch (len) {
1648 case 1:
1649 stb_p(buf, value);
1650 break;
1651 case 2:
1652 stw_p(buf, value);
1653 break;
1654 case 4:
1655 stl_p(buf, value);
1656 break;
1657 default:
1658 abort();
1659 }
1660 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001661}
1662
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001663static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001664 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001665{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001666 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001667#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001668 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001669 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001670#endif
1671
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001672 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001673 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001674}
1675
Avi Kivity70c68e42012-01-02 12:32:48 +02001676static const MemoryRegionOps subpage_ops = {
1677 .read = subpage_read,
1678 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001679 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001680 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001681};
1682
Anthony Liguoric227f092009-10-01 16:12:16 -05001683static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001684 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001685{
1686 int idx, eidx;
1687
1688 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1689 return -1;
1690 idx = SUBPAGE_IDX(start);
1691 eidx = SUBPAGE_IDX(end);
1692#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001693 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1694 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001695#endif
blueswir1db7b5422007-05-26 17:36:03 +00001696 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001697 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001698 }
1699
1700 return 0;
1701}
1702
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001703static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001704{
Anthony Liguoric227f092009-10-01 16:12:16 -05001705 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001706
Anthony Liguori7267c092011-08-20 22:09:37 -05001707 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001708
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001709 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001710 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001711 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001712 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001713 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001714#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001715 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1716 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001717#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001718 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001719
1720 return mmio;
1721}
1722
Avi Kivity5312bd82012-02-12 18:32:55 +02001723static uint16_t dummy_section(MemoryRegion *mr)
1724{
1725 MemoryRegionSection section = {
1726 .mr = mr,
1727 .offset_within_address_space = 0,
1728 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001729 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001730 };
1731
1732 return phys_section_add(&section);
1733}
1734
Avi Kivitya8170e52012-10-23 12:30:10 +02001735MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001736{
Paolo Bonzini0475d942013-05-29 12:28:21 +02001737 return address_space_memory.dispatch->sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001738}
1739
Avi Kivitye9179ce2009-06-14 11:38:52 +03001740static void io_mem_init(void)
1741{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001742 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1743 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001744 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001745 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001746 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001747 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001748 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001749}
1750
Avi Kivityac1970f2012-10-03 16:22:53 +02001751static void mem_begin(MemoryListener *listener)
1752{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001753 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001754 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
1755
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001756 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001757 d->as = as;
1758 as->next_dispatch = d;
1759}
1760
1761static void mem_commit(MemoryListener *listener)
1762{
1763 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001764 AddressSpaceDispatch *cur = as->dispatch;
1765 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001766
Paolo Bonzini0475d942013-05-29 12:28:21 +02001767 next->nodes = next_map.nodes;
1768 next->sections = next_map.sections;
1769
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001770 phys_page_compact_all(next, next_map.nodes_nb);
1771
Paolo Bonzini0475d942013-05-29 12:28:21 +02001772 as->dispatch = next;
1773 g_free(cur);
Avi Kivityac1970f2012-10-03 16:22:53 +02001774}
1775
Avi Kivity50c1e142012-02-08 21:36:02 +02001776static void core_begin(MemoryListener *listener)
1777{
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001778 uint16_t n;
1779
Paolo Bonzini60926662013-05-29 12:30:26 +02001780 prev_map = g_new(PhysPageMap, 1);
1781 *prev_map = next_map;
1782
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001783 memset(&next_map, 0, sizeof(next_map));
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001784 n = dummy_section(&io_mem_unassigned);
1785 assert(n == PHYS_SECTION_UNASSIGNED);
1786 n = dummy_section(&io_mem_notdirty);
1787 assert(n == PHYS_SECTION_NOTDIRTY);
1788 n = dummy_section(&io_mem_rom);
1789 assert(n == PHYS_SECTION_ROM);
1790 n = dummy_section(&io_mem_watch);
1791 assert(n == PHYS_SECTION_WATCH);
Avi Kivity50c1e142012-02-08 21:36:02 +02001792}
1793
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001794/* This listener's commit run after the other AddressSpaceDispatch listeners'.
1795 * All AddressSpaceDispatch instances have switched to the next map.
1796 */
1797static void core_commit(MemoryListener *listener)
1798{
Paolo Bonzini60926662013-05-29 12:30:26 +02001799 phys_sections_free(prev_map);
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001800}
1801
Avi Kivity1d711482012-10-02 18:54:45 +02001802static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001803{
Andreas Färber182735e2013-05-29 22:29:20 +02001804 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001805
1806 /* since each CPU stores ram addresses in its TLB cache, we must
1807 reset the modified entries */
1808 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001809 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001810 CPUArchState *env = cpu->env_ptr;
1811
Avi Kivity117712c2012-02-12 21:23:17 +02001812 tlb_flush(env, 1);
1813 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001814}
1815
Avi Kivity93632742012-02-08 16:54:16 +02001816static void core_log_global_start(MemoryListener *listener)
1817{
1818 cpu_physical_memory_set_dirty_tracking(1);
1819}
1820
1821static void core_log_global_stop(MemoryListener *listener)
1822{
1823 cpu_physical_memory_set_dirty_tracking(0);
1824}
1825
Avi Kivity93632742012-02-08 16:54:16 +02001826static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02001827 .begin = core_begin,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001828 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02001829 .log_global_start = core_log_global_start,
1830 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001831 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001832};
1833
Avi Kivity1d711482012-10-02 18:54:45 +02001834static MemoryListener tcg_memory_listener = {
1835 .commit = tcg_commit,
1836};
1837
Avi Kivityac1970f2012-10-03 16:22:53 +02001838void address_space_init_dispatch(AddressSpace *as)
1839{
Paolo Bonzini00752702013-05-29 12:13:54 +02001840 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001841 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001842 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001843 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001844 .region_add = mem_add,
1845 .region_nop = mem_add,
1846 .priority = 0,
1847 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001848 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001849}
1850
Avi Kivity83f3c252012-10-07 12:59:55 +02001851void address_space_destroy_dispatch(AddressSpace *as)
1852{
1853 AddressSpaceDispatch *d = as->dispatch;
1854
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001855 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001856 g_free(d);
1857 as->dispatch = NULL;
1858}
1859
Avi Kivity62152b82011-07-26 14:26:14 +03001860static void memory_map_init(void)
1861{
Anthony Liguori7267c092011-08-20 22:09:37 -05001862 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001863
1864 assert(ADDR_SPACE_BITS <= 64);
1865
1866 memory_region_init(system_memory, NULL, "system",
1867 ADDR_SPACE_BITS == 64 ?
1868 UINT64_MAX : (0x1ULL << ADDR_SPACE_BITS));
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001869 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001870
Anthony Liguori7267c092011-08-20 22:09:37 -05001871 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001872 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1873 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001874 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001875
Avi Kivityf6790af2012-10-02 20:13:51 +02001876 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001877 if (tcg_enabled()) {
1878 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1879 }
Avi Kivity62152b82011-07-26 14:26:14 +03001880}
1881
1882MemoryRegion *get_system_memory(void)
1883{
1884 return system_memory;
1885}
1886
Avi Kivity309cb472011-08-08 16:09:03 +03001887MemoryRegion *get_system_io(void)
1888{
1889 return system_io;
1890}
1891
pbrooke2eef172008-06-08 01:09:01 +00001892#endif /* !defined(CONFIG_USER_ONLY) */
1893
bellard13eb76e2004-01-24 15:23:36 +00001894/* physical memory access (slow version, mainly for debug) */
1895#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001896int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001897 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001898{
1899 int l, flags;
1900 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001901 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001902
1903 while (len > 0) {
1904 page = addr & TARGET_PAGE_MASK;
1905 l = (page + TARGET_PAGE_SIZE) - addr;
1906 if (l > len)
1907 l = len;
1908 flags = page_get_flags(page);
1909 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001910 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001911 if (is_write) {
1912 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001913 return -1;
bellard579a97f2007-11-11 14:26:47 +00001914 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001915 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001916 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001917 memcpy(p, buf, l);
1918 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001919 } else {
1920 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001921 return -1;
bellard579a97f2007-11-11 14:26:47 +00001922 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001923 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001924 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001925 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001926 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001927 }
1928 len -= l;
1929 buf += l;
1930 addr += l;
1931 }
Paul Brooka68fe892010-03-01 00:08:59 +00001932 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001933}
bellard8df1cd02005-01-28 22:37:22 +00001934
bellard13eb76e2004-01-24 15:23:36 +00001935#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001936
Avi Kivitya8170e52012-10-23 12:30:10 +02001937static void invalidate_and_set_dirty(hwaddr addr,
1938 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001939{
1940 if (!cpu_physical_memory_is_dirty(addr)) {
1941 /* invalidate code */
1942 tb_invalidate_phys_page_range(addr, addr + length, 0);
1943 /* set dirty bit */
1944 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1945 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001946 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001947}
1948
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001949static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1950{
1951 if (memory_region_is_ram(mr)) {
1952 return !(is_write && mr->readonly);
1953 }
1954 if (memory_region_is_romd(mr)) {
1955 return !is_write;
1956 }
1957
1958 return false;
1959}
1960
Richard Henderson23326162013-07-08 14:55:59 -07001961static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001962{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001963 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001964
1965 /* Regions are assumed to support 1-4 byte accesses unless
1966 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001967 if (access_size_max == 0) {
1968 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001969 }
Richard Henderson23326162013-07-08 14:55:59 -07001970
1971 /* Bound the maximum access by the alignment of the address. */
1972 if (!mr->ops->impl.unaligned) {
1973 unsigned align_size_max = addr & -addr;
1974 if (align_size_max != 0 && align_size_max < access_size_max) {
1975 access_size_max = align_size_max;
1976 }
1977 }
1978
1979 /* Don't attempt accesses larger than the maximum. */
1980 if (l > access_size_max) {
1981 l = access_size_max;
1982 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001983 if (l & (l - 1)) {
1984 l = 1 << (qemu_fls(l) - 1);
1985 }
Richard Henderson23326162013-07-08 14:55:59 -07001986
1987 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001988}
1989
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001990bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001991 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001992{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001993 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001994 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001995 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001996 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001997 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001998 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001999
bellard13eb76e2004-01-24 15:23:36 +00002000 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002001 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002002 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002003
bellard13eb76e2004-01-24 15:23:36 +00002004 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002005 if (!memory_access_is_direct(mr, is_write)) {
2006 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002007 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002008 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002009 switch (l) {
2010 case 8:
2011 /* 64 bit write access */
2012 val = ldq_p(buf);
2013 error |= io_mem_write(mr, addr1, val, 8);
2014 break;
2015 case 4:
bellard1c213d12005-09-03 10:49:04 +00002016 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002017 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002018 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002019 break;
2020 case 2:
bellard1c213d12005-09-03 10:49:04 +00002021 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002022 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002023 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002024 break;
2025 case 1:
bellard1c213d12005-09-03 10:49:04 +00002026 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002027 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002028 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002029 break;
2030 default:
2031 abort();
bellard13eb76e2004-01-24 15:23:36 +00002032 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002033 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002034 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002035 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002036 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002037 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002038 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002039 }
2040 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002041 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002042 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002043 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002044 switch (l) {
2045 case 8:
2046 /* 64 bit read access */
2047 error |= io_mem_read(mr, addr1, &val, 8);
2048 stq_p(buf, val);
2049 break;
2050 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002051 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002052 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002053 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002054 break;
2055 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002056 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002057 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002058 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002059 break;
2060 case 1:
bellard1c213d12005-09-03 10:49:04 +00002061 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002062 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002063 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002064 break;
2065 default:
2066 abort();
bellard13eb76e2004-01-24 15:23:36 +00002067 }
2068 } else {
2069 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002070 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002071 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002072 }
2073 }
2074 len -= l;
2075 buf += l;
2076 addr += l;
2077 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002078
2079 return error;
bellard13eb76e2004-01-24 15:23:36 +00002080}
bellard8df1cd02005-01-28 22:37:22 +00002081
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002082bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002083 const uint8_t *buf, int len)
2084{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002085 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002086}
2087
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002088bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002089{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002090 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002091}
2092
2093
Avi Kivitya8170e52012-10-23 12:30:10 +02002094void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002095 int len, int is_write)
2096{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002097 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002098}
2099
bellardd0ecd2a2006-04-23 17:14:48 +00002100/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002101void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002102 const uint8_t *buf, int len)
2103{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002104 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002105 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002106 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002107 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002108
bellardd0ecd2a2006-04-23 17:14:48 +00002109 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002110 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002111 mr = address_space_translate(&address_space_memory,
2112 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002113
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002114 if (!(memory_region_is_ram(mr) ||
2115 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002116 /* do nothing */
2117 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002118 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002119 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002120 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002121 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002122 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002123 }
2124 len -= l;
2125 buf += l;
2126 addr += l;
2127 }
2128}
2129
aliguori6d16c2f2009-01-22 16:59:11 +00002130typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002131 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002132 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002133 hwaddr addr;
2134 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002135} BounceBuffer;
2136
2137static BounceBuffer bounce;
2138
aliguoriba223c22009-01-22 16:59:16 +00002139typedef struct MapClient {
2140 void *opaque;
2141 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002142 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002143} MapClient;
2144
Blue Swirl72cf2d42009-09-12 07:36:22 +00002145static QLIST_HEAD(map_client_list, MapClient) map_client_list
2146 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002147
2148void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2149{
Anthony Liguori7267c092011-08-20 22:09:37 -05002150 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002151
2152 client->opaque = opaque;
2153 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002154 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002155 return client;
2156}
2157
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002158static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002159{
2160 MapClient *client = (MapClient *)_client;
2161
Blue Swirl72cf2d42009-09-12 07:36:22 +00002162 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002163 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002164}
2165
2166static void cpu_notify_map_clients(void)
2167{
2168 MapClient *client;
2169
Blue Swirl72cf2d42009-09-12 07:36:22 +00002170 while (!QLIST_EMPTY(&map_client_list)) {
2171 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002172 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002173 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002174 }
2175}
2176
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002177bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2178{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002179 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002180 hwaddr l, xlat;
2181
2182 while (len > 0) {
2183 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002184 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2185 if (!memory_access_is_direct(mr, is_write)) {
2186 l = memory_access_size(mr, l, addr);
2187 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002188 return false;
2189 }
2190 }
2191
2192 len -= l;
2193 addr += l;
2194 }
2195 return true;
2196}
2197
aliguori6d16c2f2009-01-22 16:59:11 +00002198/* Map a physical memory region into a host virtual address.
2199 * May map a subset of the requested range, given by and returned in *plen.
2200 * May return NULL if resources needed to perform the mapping are exhausted.
2201 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002202 * Use cpu_register_map_client() to know when retrying the map operation is
2203 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002204 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002205void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002206 hwaddr addr,
2207 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002208 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002209{
Avi Kivitya8170e52012-10-23 12:30:10 +02002210 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002211 hwaddr done = 0;
2212 hwaddr l, xlat, base;
2213 MemoryRegion *mr, *this_mr;
2214 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002215
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002216 if (len == 0) {
2217 return NULL;
2218 }
aliguori6d16c2f2009-01-22 16:59:11 +00002219
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002220 l = len;
2221 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2222 if (!memory_access_is_direct(mr, is_write)) {
2223 if (bounce.buffer) {
2224 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002225 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002226 /* Avoid unbounded allocations */
2227 l = MIN(l, TARGET_PAGE_SIZE);
2228 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002229 bounce.addr = addr;
2230 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002231
2232 memory_region_ref(mr);
2233 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002234 if (!is_write) {
2235 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002236 }
aliguori6d16c2f2009-01-22 16:59:11 +00002237
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002238 *plen = l;
2239 return bounce.buffer;
2240 }
2241
2242 base = xlat;
2243 raddr = memory_region_get_ram_addr(mr);
2244
2245 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002246 len -= l;
2247 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002248 done += l;
2249 if (len == 0) {
2250 break;
2251 }
2252
2253 l = len;
2254 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2255 if (this_mr != mr || xlat != base + done) {
2256 break;
2257 }
aliguori6d16c2f2009-01-22 16:59:11 +00002258 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002259
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002260 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002261 *plen = done;
2262 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002263}
2264
Avi Kivityac1970f2012-10-03 16:22:53 +02002265/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002266 * Will also mark the memory as dirty if is_write == 1. access_len gives
2267 * the amount of memory that was actually read or written by the caller.
2268 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002269void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2270 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002271{
2272 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002273 MemoryRegion *mr;
2274 ram_addr_t addr1;
2275
2276 mr = qemu_ram_addr_from_host(buffer, &addr1);
2277 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002278 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002279 while (access_len) {
2280 unsigned l;
2281 l = TARGET_PAGE_SIZE;
2282 if (l > access_len)
2283 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002284 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002285 addr1 += l;
2286 access_len -= l;
2287 }
2288 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002289 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002290 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002291 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002292 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002293 return;
2294 }
2295 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002296 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002297 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002298 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002299 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002300 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002301 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002302}
bellardd0ecd2a2006-04-23 17:14:48 +00002303
Avi Kivitya8170e52012-10-23 12:30:10 +02002304void *cpu_physical_memory_map(hwaddr addr,
2305 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002306 int is_write)
2307{
2308 return address_space_map(&address_space_memory, addr, plen, is_write);
2309}
2310
Avi Kivitya8170e52012-10-23 12:30:10 +02002311void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2312 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002313{
2314 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2315}
2316
bellard8df1cd02005-01-28 22:37:22 +00002317/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002318static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002319 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002320{
bellard8df1cd02005-01-28 22:37:22 +00002321 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002322 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002323 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002324 hwaddr l = 4;
2325 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002326
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002327 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2328 false);
2329 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002330 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002331 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002332#if defined(TARGET_WORDS_BIGENDIAN)
2333 if (endian == DEVICE_LITTLE_ENDIAN) {
2334 val = bswap32(val);
2335 }
2336#else
2337 if (endian == DEVICE_BIG_ENDIAN) {
2338 val = bswap32(val);
2339 }
2340#endif
bellard8df1cd02005-01-28 22:37:22 +00002341 } else {
2342 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002343 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002344 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002345 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002346 switch (endian) {
2347 case DEVICE_LITTLE_ENDIAN:
2348 val = ldl_le_p(ptr);
2349 break;
2350 case DEVICE_BIG_ENDIAN:
2351 val = ldl_be_p(ptr);
2352 break;
2353 default:
2354 val = ldl_p(ptr);
2355 break;
2356 }
bellard8df1cd02005-01-28 22:37:22 +00002357 }
2358 return val;
2359}
2360
Avi Kivitya8170e52012-10-23 12:30:10 +02002361uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002362{
2363 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2364}
2365
Avi Kivitya8170e52012-10-23 12:30:10 +02002366uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002367{
2368 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2369}
2370
Avi Kivitya8170e52012-10-23 12:30:10 +02002371uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002372{
2373 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2374}
2375
bellard84b7b8e2005-11-28 21:19:04 +00002376/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002377static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002378 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002379{
bellard84b7b8e2005-11-28 21:19:04 +00002380 uint8_t *ptr;
2381 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002382 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002383 hwaddr l = 8;
2384 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002385
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002386 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2387 false);
2388 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002389 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002390 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002391#if defined(TARGET_WORDS_BIGENDIAN)
2392 if (endian == DEVICE_LITTLE_ENDIAN) {
2393 val = bswap64(val);
2394 }
2395#else
2396 if (endian == DEVICE_BIG_ENDIAN) {
2397 val = bswap64(val);
2398 }
2399#endif
bellard84b7b8e2005-11-28 21:19:04 +00002400 } else {
2401 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002402 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002403 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002404 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002405 switch (endian) {
2406 case DEVICE_LITTLE_ENDIAN:
2407 val = ldq_le_p(ptr);
2408 break;
2409 case DEVICE_BIG_ENDIAN:
2410 val = ldq_be_p(ptr);
2411 break;
2412 default:
2413 val = ldq_p(ptr);
2414 break;
2415 }
bellard84b7b8e2005-11-28 21:19:04 +00002416 }
2417 return val;
2418}
2419
Avi Kivitya8170e52012-10-23 12:30:10 +02002420uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002421{
2422 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2423}
2424
Avi Kivitya8170e52012-10-23 12:30:10 +02002425uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002426{
2427 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2428}
2429
Avi Kivitya8170e52012-10-23 12:30:10 +02002430uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002431{
2432 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2433}
2434
bellardaab33092005-10-30 20:48:42 +00002435/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002436uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002437{
2438 uint8_t val;
2439 cpu_physical_memory_read(addr, &val, 1);
2440 return val;
2441}
2442
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002443/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002444static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002445 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002446{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002447 uint8_t *ptr;
2448 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002449 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002450 hwaddr l = 2;
2451 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002452
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002453 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2454 false);
2455 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002456 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002457 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002458#if defined(TARGET_WORDS_BIGENDIAN)
2459 if (endian == DEVICE_LITTLE_ENDIAN) {
2460 val = bswap16(val);
2461 }
2462#else
2463 if (endian == DEVICE_BIG_ENDIAN) {
2464 val = bswap16(val);
2465 }
2466#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002467 } else {
2468 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002469 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002470 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002471 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002472 switch (endian) {
2473 case DEVICE_LITTLE_ENDIAN:
2474 val = lduw_le_p(ptr);
2475 break;
2476 case DEVICE_BIG_ENDIAN:
2477 val = lduw_be_p(ptr);
2478 break;
2479 default:
2480 val = lduw_p(ptr);
2481 break;
2482 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002483 }
2484 return val;
bellardaab33092005-10-30 20:48:42 +00002485}
2486
Avi Kivitya8170e52012-10-23 12:30:10 +02002487uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002488{
2489 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2490}
2491
Avi Kivitya8170e52012-10-23 12:30:10 +02002492uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002493{
2494 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2495}
2496
Avi Kivitya8170e52012-10-23 12:30:10 +02002497uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002498{
2499 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2500}
2501
bellard8df1cd02005-01-28 22:37:22 +00002502/* warning: addr must be aligned. The ram page is not masked as dirty
2503 and the code inside is not invalidated. It is useful if the dirty
2504 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002505void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002506{
bellard8df1cd02005-01-28 22:37:22 +00002507 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002508 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002509 hwaddr l = 4;
2510 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002511
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002512 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2513 true);
2514 if (l < 4 || !memory_access_is_direct(mr, true)) {
2515 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002516 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002517 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002518 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002519 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002520
2521 if (unlikely(in_migration)) {
2522 if (!cpu_physical_memory_is_dirty(addr1)) {
2523 /* invalidate code */
2524 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2525 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002526 cpu_physical_memory_set_dirty_flags(
2527 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002528 }
2529 }
bellard8df1cd02005-01-28 22:37:22 +00002530 }
2531}
2532
2533/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002534static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002535 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002536{
bellard8df1cd02005-01-28 22:37:22 +00002537 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002538 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002539 hwaddr l = 4;
2540 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002541
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002542 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2543 true);
2544 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002545#if defined(TARGET_WORDS_BIGENDIAN)
2546 if (endian == DEVICE_LITTLE_ENDIAN) {
2547 val = bswap32(val);
2548 }
2549#else
2550 if (endian == DEVICE_BIG_ENDIAN) {
2551 val = bswap32(val);
2552 }
2553#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002554 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002555 } else {
bellard8df1cd02005-01-28 22:37:22 +00002556 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002557 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002558 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002559 switch (endian) {
2560 case DEVICE_LITTLE_ENDIAN:
2561 stl_le_p(ptr, val);
2562 break;
2563 case DEVICE_BIG_ENDIAN:
2564 stl_be_p(ptr, val);
2565 break;
2566 default:
2567 stl_p(ptr, val);
2568 break;
2569 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002570 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002571 }
2572}
2573
Avi Kivitya8170e52012-10-23 12:30:10 +02002574void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002575{
2576 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2577}
2578
Avi Kivitya8170e52012-10-23 12:30:10 +02002579void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002580{
2581 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2582}
2583
Avi Kivitya8170e52012-10-23 12:30:10 +02002584void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002585{
2586 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2587}
2588
bellardaab33092005-10-30 20:48:42 +00002589/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002590void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002591{
2592 uint8_t v = val;
2593 cpu_physical_memory_write(addr, &v, 1);
2594}
2595
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002596/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002597static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002598 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002599{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002600 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002601 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002602 hwaddr l = 2;
2603 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002604
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002605 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2606 true);
2607 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002608#if defined(TARGET_WORDS_BIGENDIAN)
2609 if (endian == DEVICE_LITTLE_ENDIAN) {
2610 val = bswap16(val);
2611 }
2612#else
2613 if (endian == DEVICE_BIG_ENDIAN) {
2614 val = bswap16(val);
2615 }
2616#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002617 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002618 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002619 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002620 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002621 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002622 switch (endian) {
2623 case DEVICE_LITTLE_ENDIAN:
2624 stw_le_p(ptr, val);
2625 break;
2626 case DEVICE_BIG_ENDIAN:
2627 stw_be_p(ptr, val);
2628 break;
2629 default:
2630 stw_p(ptr, val);
2631 break;
2632 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002633 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002634 }
bellardaab33092005-10-30 20:48:42 +00002635}
2636
Avi Kivitya8170e52012-10-23 12:30:10 +02002637void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002638{
2639 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2640}
2641
Avi Kivitya8170e52012-10-23 12:30:10 +02002642void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002643{
2644 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2645}
2646
Avi Kivitya8170e52012-10-23 12:30:10 +02002647void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002648{
2649 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2650}
2651
bellardaab33092005-10-30 20:48:42 +00002652/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002653void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002654{
2655 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002656 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002657}
2658
Avi Kivitya8170e52012-10-23 12:30:10 +02002659void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002660{
2661 val = cpu_to_le64(val);
2662 cpu_physical_memory_write(addr, &val, 8);
2663}
2664
Avi Kivitya8170e52012-10-23 12:30:10 +02002665void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002666{
2667 val = cpu_to_be64(val);
2668 cpu_physical_memory_write(addr, &val, 8);
2669}
2670
aliguori5e2972f2009-03-28 17:51:36 +00002671/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002672int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002673 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002674{
2675 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002676 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002677 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002678
2679 while (len > 0) {
2680 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002681 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002682 /* if no physical page mapped, return an error */
2683 if (phys_addr == -1)
2684 return -1;
2685 l = (page + TARGET_PAGE_SIZE) - addr;
2686 if (l > len)
2687 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002688 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002689 if (is_write)
2690 cpu_physical_memory_write_rom(phys_addr, buf, l);
2691 else
aliguori5e2972f2009-03-28 17:51:36 +00002692 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002693 len -= l;
2694 buf += l;
2695 addr += l;
2696 }
2697 return 0;
2698}
Paul Brooka68fe892010-03-01 00:08:59 +00002699#endif
bellard13eb76e2004-01-24 15:23:36 +00002700
Blue Swirl8e4a4242013-01-06 18:30:17 +00002701#if !defined(CONFIG_USER_ONLY)
2702
2703/*
2704 * A helper function for the _utterly broken_ virtio device model to find out if
2705 * it's running on a big endian machine. Don't do this at home kids!
2706 */
2707bool virtio_is_big_endian(void);
2708bool virtio_is_big_endian(void)
2709{
2710#if defined(TARGET_WORDS_BIGENDIAN)
2711 return true;
2712#else
2713 return false;
2714#endif
2715}
2716
2717#endif
2718
Wen Congyang76f35532012-05-07 12:04:18 +08002719#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002720bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002721{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002722 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002723 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002724
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002725 mr = address_space_translate(&address_space_memory,
2726 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002727
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002728 return !(memory_region_is_ram(mr) ||
2729 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002730}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002731
2732void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2733{
2734 RAMBlock *block;
2735
2736 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2737 func(block->host, block->offset, block->length, opaque);
2738 }
2739}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002740#endif