blob: 00526d18c0002ac617a64811d812beb7cfe3481d [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020053
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020054#include "qemu/range.h"
55
blueswir1db7b5422007-05-26 17:36:03 +000056//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000057
pbrook99773bd2006-04-16 15:14:59 +000058#if !defined(CONFIG_USER_ONLY)
aliguori74576192008-10-06 14:02:03 +000059static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000060
Paolo Bonzinia3161032012-11-14 15:54:48 +010061RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030062
63static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030064static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030065
Avi Kivityf6790af2012-10-02 20:13:51 +020066AddressSpace address_space_io;
67AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020068
Paolo Bonzini0844e002013-05-24 14:37:28 +020069MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020070static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020071
pbrooke2eef172008-06-08 01:09:01 +000072#endif
bellard9fa3e852004-01-04 18:06:42 +000073
Andreas Färberbdc44642013-06-24 23:50:24 +020074struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000075/* current CPU in the current thread. It is only valid inside
76 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020077DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000078/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000079 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000080 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010081int use_icount;
bellard6a00d602005-11-21 23:25:50 +000082
pbrooke2eef172008-06-08 01:09:01 +000083#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020084
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020085typedef struct PhysPageEntry PhysPageEntry;
86
87struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020088 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020089 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020090 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020091 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020092};
93
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020094#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
95
Paolo Bonzini03f49952013-11-07 17:14:36 +010096/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010097#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +010098
Michael S. Tsirkin026736c2013-11-13 20:13:03 +020099#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100100#define P_L2_SIZE (1 << P_L2_BITS)
101
102#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
103
104typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200105
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200106typedef struct PhysPageMap {
107 unsigned sections_nb;
108 unsigned sections_nb_alloc;
109 unsigned nodes_nb;
110 unsigned nodes_nb_alloc;
111 Node *nodes;
112 MemoryRegionSection *sections;
113} PhysPageMap;
114
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200115struct AddressSpaceDispatch {
116 /* This is a multi-level map on the physical address space.
117 * The bottom level has pointers to MemoryRegionSections.
118 */
119 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200120 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200121 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200122};
123
Jan Kiszka90260c62013-05-26 21:46:51 +0200124#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
125typedef struct subpage_t {
126 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200127 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200128 hwaddr base;
129 uint16_t sub_section[TARGET_PAGE_SIZE];
130} subpage_t;
131
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200132#define PHYS_SECTION_UNASSIGNED 0
133#define PHYS_SECTION_NOTDIRTY 1
134#define PHYS_SECTION_ROM 2
135#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200136
pbrooke2eef172008-06-08 01:09:01 +0000137static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300138static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000139
Avi Kivity1ec9b902012-01-02 12:47:48 +0200140static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000141#endif
bellard54936002003-05-13 00:25:15 +0000142
Paul Brook6d9a1302010-02-28 23:55:53 +0000143#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200144
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200145static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200146{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
148 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
149 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
150 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200151 }
152}
153
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200154static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200155{
156 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200157 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200158
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200159 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200160 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200161 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100162 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200163 map->nodes[ret][i].skip = 1;
164 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200165 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200166 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167}
168
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200169static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
170 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200171 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200172{
173 PhysPageEntry *p;
174 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100175 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200176
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200177 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200178 lp->ptr = phys_map_node_alloc(map);
179 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200180 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100181 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200182 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200183 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200184 }
185 }
186 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100189 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190
Paolo Bonzini03f49952013-11-07 17:14:36 +0100191 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200192 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200193 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200194 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200195 *index += step;
196 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200197 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200198 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200199 }
200 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200201 }
202}
203
Avi Kivityac1970f2012-10-03 16:22:53 +0200204static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200205 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200206 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000207{
Avi Kivity29990972012-02-13 20:21:20 +0200208 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000210
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200211 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000212}
213
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200214/* Compact a non leaf page entry. Simply detect that the entry has a single child,
215 * and update our entry so we can skip it and go directly to the destination.
216 */
217static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
218{
219 unsigned valid_ptr = P_L2_SIZE;
220 int valid = 0;
221 PhysPageEntry *p;
222 int i;
223
224 if (lp->ptr == PHYS_MAP_NODE_NIL) {
225 return;
226 }
227
228 p = nodes[lp->ptr];
229 for (i = 0; i < P_L2_SIZE; i++) {
230 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
231 continue;
232 }
233
234 valid_ptr = i;
235 valid++;
236 if (p[i].skip) {
237 phys_page_compact(&p[i], nodes, compacted);
238 }
239 }
240
241 /* We can only compress if there's only one child. */
242 if (valid != 1) {
243 return;
244 }
245
246 assert(valid_ptr < P_L2_SIZE);
247
248 /* Don't compress if it won't fit in the # of bits we have. */
249 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
250 return;
251 }
252
253 lp->ptr = p[valid_ptr].ptr;
254 if (!p[valid_ptr].skip) {
255 /* If our only child is a leaf, make this a leaf. */
256 /* By design, we should have made this node a leaf to begin with so we
257 * should never reach here.
258 * But since it's so simple to handle this, let's do it just in case we
259 * change this rule.
260 */
261 lp->skip = 0;
262 } else {
263 lp->skip += p[valid_ptr].skip;
264 }
265}
266
267static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
268{
269 DECLARE_BITMAP(compacted, nodes_nb);
270
271 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200272 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200273 }
274}
275
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200276static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200277 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000278{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200279 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200280 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200281 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200282
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200283 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200284 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200285 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200286 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200287 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100288 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200289 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200290
291 if (sections[lp.ptr].size.hi ||
292 range_covers_byte(sections[lp.ptr].offset_within_address_space,
293 sections[lp.ptr].size.lo, addr)) {
294 return &sections[lp.ptr];
295 } else {
296 return &sections[PHYS_SECTION_UNASSIGNED];
297 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200298}
299
Blue Swirle5548612012-04-21 13:08:33 +0000300bool memory_region_is_unassigned(MemoryRegion *mr)
301{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200302 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000303 && mr != &io_mem_watch;
304}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200305
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200306static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200307 hwaddr addr,
308 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200309{
Jan Kiszka90260c62013-05-26 21:46:51 +0200310 MemoryRegionSection *section;
311 subpage_t *subpage;
312
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200313 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200314 if (resolve_subpage && section->mr->subpage) {
315 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200316 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200317 }
318 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200319}
320
Jan Kiszka90260c62013-05-26 21:46:51 +0200321static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200322address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200323 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200324{
325 MemoryRegionSection *section;
326 Int128 diff;
327
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200328 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200329 /* Compute offset within MemoryRegionSection */
330 addr -= section->offset_within_address_space;
331
332 /* Compute offset within MemoryRegion */
333 *xlat = addr + section->offset_within_region;
334
335 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100336 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200337 return section;
338}
Jan Kiszka90260c62013-05-26 21:46:51 +0200339
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200340MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
341 hwaddr *xlat, hwaddr *plen,
342 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200343{
Avi Kivity30951152012-10-30 13:47:46 +0200344 IOMMUTLBEntry iotlb;
345 MemoryRegionSection *section;
346 MemoryRegion *mr;
347 hwaddr len = *plen;
348
349 for (;;) {
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200350 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200351 mr = section->mr;
352
353 if (!mr->iommu_ops) {
354 break;
355 }
356
357 iotlb = mr->iommu_ops->translate(mr, addr);
358 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
359 | (addr & iotlb.addr_mask));
360 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
361 if (!(iotlb.perm & (1 << is_write))) {
362 mr = &io_mem_unassigned;
363 break;
364 }
365
366 as = iotlb.target_as;
367 }
368
369 *plen = len;
370 *xlat = addr;
371 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200372}
373
374MemoryRegionSection *
375address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
376 hwaddr *plen)
377{
Avi Kivity30951152012-10-30 13:47:46 +0200378 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200379 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200380
381 assert(!section->mr->iommu_ops);
382 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200383}
bellard9fa3e852004-01-04 18:06:42 +0000384#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000385
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200386void cpu_exec_init_all(void)
387{
388#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700389 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200390 memory_map_init();
391 io_mem_init();
392#endif
393}
394
Andreas Färberb170fce2013-01-20 20:23:22 +0100395#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000396
Juan Quintelae59fb372009-09-29 22:48:21 +0200397static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200398{
Andreas Färber259186a2013-01-17 18:51:17 +0100399 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200400
aurel323098dba2009-03-07 21:28:24 +0000401 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
402 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100403 cpu->interrupt_request &= ~0x01;
404 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000405
406 return 0;
407}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200408
Andreas Färber1a1562f2013-06-17 04:09:11 +0200409const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200410 .name = "cpu_common",
411 .version_id = 1,
412 .minimum_version_id = 1,
413 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200414 .post_load = cpu_common_post_load,
415 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100416 VMSTATE_UINT32(halted, CPUState),
417 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200418 VMSTATE_END_OF_LIST()
419 }
420};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200421
pbrook9656f322008-07-01 20:01:19 +0000422#endif
423
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100424CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400425{
Andreas Färberbdc44642013-06-24 23:50:24 +0200426 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400427
Andreas Färberbdc44642013-06-24 23:50:24 +0200428 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100429 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200430 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100431 }
Glauber Costa950f1472009-06-09 12:15:18 -0400432 }
433
Andreas Färberbdc44642013-06-24 23:50:24 +0200434 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400435}
436
Andreas Färber9349b4f2012-03-14 01:38:32 +0100437void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000438{
Andreas Färber9f09e182012-05-03 06:59:07 +0200439 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100440 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200441 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000442 int cpu_index;
443
pbrookc2764712009-03-07 15:24:59 +0000444#if defined(CONFIG_USER_ONLY)
445 cpu_list_lock();
446#endif
bellard6a00d602005-11-21 23:25:50 +0000447 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200448 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000449 cpu_index++;
450 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100451 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100452 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000453 QTAILQ_INIT(&env->breakpoints);
454 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100455#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200456 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100457#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200458 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000459#if defined(CONFIG_USER_ONLY)
460 cpu_list_unlock();
461#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200462 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
463 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
464 }
pbrookb3c77242008-06-30 16:31:04 +0000465#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600466 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000467 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100468 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200469 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000470#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100471 if (cc->vmsd != NULL) {
472 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
473 }
bellardfd6ce8f2003-05-14 19:00:11 +0000474}
475
bellard1fddef42005-04-17 19:16:13 +0000476#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000477#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200478static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000479{
480 tb_invalidate_phys_page_range(pc, pc + 1, 0);
481}
482#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200483static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400484{
Max Filippove8262a12013-09-27 22:29:17 +0400485 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
486 if (phys != -1) {
487 tb_invalidate_phys_addr(phys | (pc & ~TARGET_PAGE_MASK));
488 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400489}
bellardc27004e2005-01-03 23:35:10 +0000490#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000491#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000492
Paul Brookc527ee82010-03-01 03:31:14 +0000493#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100494void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000495
496{
497}
498
Andreas Färber9349b4f2012-03-14 01:38:32 +0100499int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000500 int flags, CPUWatchpoint **watchpoint)
501{
502 return -ENOSYS;
503}
504#else
pbrook6658ffb2007-03-16 23:58:11 +0000505/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100506int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000507 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000508{
aliguorib4051332008-11-18 20:14:20 +0000509 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000510 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000511
aliguorib4051332008-11-18 20:14:20 +0000512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400513 if ((len & (len - 1)) || (addr & ~len_mask) ||
514 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000515 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
516 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
517 return -EINVAL;
518 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500519 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000520
aliguoria1d1bb32008-11-18 20:07:32 +0000521 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000522 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000523 wp->flags = flags;
524
aliguori2dc9f412008-11-18 20:56:59 +0000525 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000526 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000527 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000528 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000529 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000530
pbrook6658ffb2007-03-16 23:58:11 +0000531 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000532
533 if (watchpoint)
534 *watchpoint = wp;
535 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000536}
537
aliguoria1d1bb32008-11-18 20:07:32 +0000538/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100539int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000540 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000541{
aliguorib4051332008-11-18 20:14:20 +0000542 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000543 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000544
Blue Swirl72cf2d42009-09-12 07:36:22 +0000545 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000546 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000547 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000548 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000549 return 0;
550 }
551 }
aliguoria1d1bb32008-11-18 20:07:32 +0000552 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000553}
554
aliguoria1d1bb32008-11-18 20:07:32 +0000555/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100556void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000557{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000558 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000559
aliguoria1d1bb32008-11-18 20:07:32 +0000560 tlb_flush_page(env, watchpoint->vaddr);
561
Anthony Liguori7267c092011-08-20 22:09:37 -0500562 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000563}
564
aliguoria1d1bb32008-11-18 20:07:32 +0000565/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100566void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000567{
aliguoric0ce9982008-11-25 22:13:57 +0000568 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000569
Blue Swirl72cf2d42009-09-12 07:36:22 +0000570 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000571 if (wp->flags & mask)
572 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000573 }
aliguoria1d1bb32008-11-18 20:07:32 +0000574}
Paul Brookc527ee82010-03-01 03:31:14 +0000575#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000576
577/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100578int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000579 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000580{
bellard1fddef42005-04-17 19:16:13 +0000581#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000582 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000583
Anthony Liguori7267c092011-08-20 22:09:37 -0500584 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000585
586 bp->pc = pc;
587 bp->flags = flags;
588
aliguori2dc9f412008-11-18 20:56:59 +0000589 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200590 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000591 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200592 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000593 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200594 }
aliguoria1d1bb32008-11-18 20:07:32 +0000595
Andreas Färber00b941e2013-06-29 18:55:54 +0200596 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000597
Andreas Färber00b941e2013-06-29 18:55:54 +0200598 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000599 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200600 }
aliguoria1d1bb32008-11-18 20:07:32 +0000601 return 0;
602#else
603 return -ENOSYS;
604#endif
605}
606
607/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100608int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000609{
610#if defined(TARGET_HAS_ICE)
611 CPUBreakpoint *bp;
612
Blue Swirl72cf2d42009-09-12 07:36:22 +0000613 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000614 if (bp->pc == pc && bp->flags == flags) {
615 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000616 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000617 }
bellard4c3a88a2003-07-26 12:06:08 +0000618 }
aliguoria1d1bb32008-11-18 20:07:32 +0000619 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000620#else
aliguoria1d1bb32008-11-18 20:07:32 +0000621 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000622#endif
623}
624
aliguoria1d1bb32008-11-18 20:07:32 +0000625/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100626void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000627{
bellard1fddef42005-04-17 19:16:13 +0000628#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000629 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000630
Andreas Färber00b941e2013-06-29 18:55:54 +0200631 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000632
Anthony Liguori7267c092011-08-20 22:09:37 -0500633 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000634#endif
635}
636
637/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100638void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000639{
640#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000641 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000642
Blue Swirl72cf2d42009-09-12 07:36:22 +0000643 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000644 if (bp->flags & mask)
645 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000646 }
bellard4c3a88a2003-07-26 12:06:08 +0000647#endif
648}
649
bellardc33a3462003-07-29 20:50:33 +0000650/* enable or disable single step mode. EXCP_DEBUG is returned by the
651 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200652void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000653{
bellard1fddef42005-04-17 19:16:13 +0000654#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200655 if (cpu->singlestep_enabled != enabled) {
656 cpu->singlestep_enabled = enabled;
657 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200658 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200659 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100660 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000661 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200662 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000663 tb_flush(env);
664 }
bellardc33a3462003-07-29 20:50:33 +0000665 }
666#endif
667}
668
Andreas Färber9349b4f2012-03-14 01:38:32 +0100669void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000670{
Andreas Färber878096e2013-05-27 01:33:50 +0200671 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000672 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000673 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000674
675 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000676 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000677 fprintf(stderr, "qemu: fatal: ");
678 vfprintf(stderr, fmt, ap);
679 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200680 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000681 if (qemu_log_enabled()) {
682 qemu_log("qemu: fatal: ");
683 qemu_log_vprintf(fmt, ap2);
684 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200685 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000686 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000687 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000688 }
pbrook493ae1f2007-11-23 16:53:59 +0000689 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000690 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200691#if defined(CONFIG_USER_ONLY)
692 {
693 struct sigaction act;
694 sigfillset(&act.sa_mask);
695 act.sa_handler = SIG_DFL;
696 sigaction(SIGABRT, &act, NULL);
697 }
698#endif
bellard75012672003-06-21 13:11:07 +0000699 abort();
700}
701
bellard01243112004-01-04 15:48:17 +0000702#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200703static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
704{
705 RAMBlock *block;
706
707 /* The list is protected by the iothread lock here. */
708 block = ram_list.mru_block;
709 if (block && addr - block->offset < block->length) {
710 goto found;
711 }
712 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
713 if (addr - block->offset < block->length) {
714 goto found;
715 }
716 }
717
718 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
719 abort();
720
721found:
722 ram_list.mru_block = block;
723 return block;
724}
725
Juan Quintelad24981d2012-05-22 00:42:40 +0200726static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
727 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000728{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200729 RAMBlock *block;
730 ram_addr_t start1;
bellardf23db162005-08-21 19:12:28 +0000731
Paolo Bonzini041603f2013-09-09 17:49:45 +0200732 block = qemu_get_ram_block(start);
733 assert(block == qemu_get_ram_block(end - 1));
734 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000735 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200736}
737
738/* Note: start and end must be within the same ram block. */
739void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
740 int dirty_flags)
741{
742 uintptr_t length;
743
744 start &= TARGET_PAGE_MASK;
745 end = TARGET_PAGE_ALIGN(end);
746
747 length = end - start;
748 if (length == 0)
749 return;
750 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
751
752 if (tcg_enabled()) {
753 tlb_reset_dirty_range_all(start, end, length);
754 }
bellard1ccde1c2004-02-06 19:46:14 +0000755}
756
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000757static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +0000758{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200759 int ret = 0;
aliguori74576192008-10-06 14:02:03 +0000760 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +0200761 return ret;
aliguori74576192008-10-06 14:02:03 +0000762}
763
Avi Kivitya8170e52012-10-23 12:30:10 +0200764hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200765 MemoryRegionSection *section,
766 target_ulong vaddr,
767 hwaddr paddr, hwaddr xlat,
768 int prot,
769 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000770{
Avi Kivitya8170e52012-10-23 12:30:10 +0200771 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000772 CPUWatchpoint *wp;
773
Blue Swirlcc5bea62012-04-14 14:56:48 +0000774 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000775 /* Normal RAM. */
776 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200777 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000778 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200779 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000780 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200781 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000782 }
783 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200784 iotlb = section - address_space_memory.dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200785 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000786 }
787
788 /* Make accesses to pages with watchpoints go via the
789 watchpoint trap routines. */
790 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
791 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
792 /* Avoid trapping reads of pages with a write breakpoint. */
793 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200794 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000795 *address |= TLB_MMIO;
796 break;
797 }
798 }
799 }
800
801 return iotlb;
802}
bellard9fa3e852004-01-04 18:06:42 +0000803#endif /* defined(CONFIG_USER_ONLY) */
804
pbrooke2eef172008-06-08 01:09:01 +0000805#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000806
Anthony Liguoric227f092009-10-01 16:12:16 -0500807static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200808 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200809static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200810
Stefan Weil575ddeb2013-09-29 20:56:45 +0200811static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200812
813/*
814 * Set a custom physical guest memory alloator.
815 * Accelerators with unusual needs may need this. Hopefully, we can
816 * get rid of it eventually.
817 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200818void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200819{
820 phys_mem_alloc = alloc;
821}
822
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200823static uint16_t phys_section_add(PhysPageMap *map,
824 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200825{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200826 /* The physical section number is ORed with a page-aligned
827 * pointer to produce the iotlb entries. Thus it should
828 * never overflow into the page-aligned value.
829 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200830 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200831
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200832 if (map->sections_nb == map->sections_nb_alloc) {
833 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
834 map->sections = g_renew(MemoryRegionSection, map->sections,
835 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200836 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200837 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200838 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200839 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200840}
841
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200842static void phys_section_destroy(MemoryRegion *mr)
843{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200844 memory_region_unref(mr);
845
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200846 if (mr->subpage) {
847 subpage_t *subpage = container_of(mr, subpage_t, iomem);
848 memory_region_destroy(&subpage->iomem);
849 g_free(subpage);
850 }
851}
852
Paolo Bonzini60926662013-05-29 12:30:26 +0200853static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200854{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200855 while (map->sections_nb > 0) {
856 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200857 phys_section_destroy(section->mr);
858 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200859 g_free(map->sections);
860 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200861}
862
Avi Kivityac1970f2012-10-03 16:22:53 +0200863static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200864{
865 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200866 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200867 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200868 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200869 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200870 MemoryRegionSection subsection = {
871 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200872 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200873 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200874 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200875
Avi Kivityf3705d52012-03-08 16:16:34 +0200876 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200877
Avi Kivityf3705d52012-03-08 16:16:34 +0200878 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200879 subpage = subpage_init(d->as, base);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200880 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200881 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200882 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200883 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200884 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200885 }
886 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200887 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200888 subpage_register(subpage, start, end,
889 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200890}
891
892
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200893static void register_multipage(AddressSpaceDispatch *d,
894 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000895{
Avi Kivitya8170e52012-10-23 12:30:10 +0200896 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200897 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200898 uint64_t num_pages = int128_get64(int128_rshift(section->size,
899 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200900
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200901 assert(num_pages);
902 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000903}
904
Avi Kivityac1970f2012-10-03 16:22:53 +0200905static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200906{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200907 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200908 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200909 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200910 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200911
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200912 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
913 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
914 - now.offset_within_address_space;
915
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200916 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200917 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200918 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200919 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200920 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200921 while (int128_ne(remain.size, now.size)) {
922 remain.size = int128_sub(remain.size, now.size);
923 remain.offset_within_address_space += int128_get64(now.size);
924 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400925 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200926 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200927 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800928 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200929 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200930 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400931 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200932 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200933 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400934 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200935 }
936}
937
Sheng Yang62a27442010-01-26 19:21:16 +0800938void qemu_flush_coalesced_mmio_buffer(void)
939{
940 if (kvm_enabled())
941 kvm_flush_coalesced_mmio_buffer();
942}
943
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700944void qemu_mutex_lock_ramlist(void)
945{
946 qemu_mutex_lock(&ram_list.mutex);
947}
948
949void qemu_mutex_unlock_ramlist(void)
950{
951 qemu_mutex_unlock(&ram_list.mutex);
952}
953
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200954#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300955
956#include <sys/vfs.h>
957
958#define HUGETLBFS_MAGIC 0x958458f6
959
960static long gethugepagesize(const char *path)
961{
962 struct statfs fs;
963 int ret;
964
965 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900966 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300967 } while (ret != 0 && errno == EINTR);
968
969 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900970 perror(path);
971 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300972 }
973
974 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900975 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300976
977 return fs.f_bsize;
978}
979
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200980static sigjmp_buf sigjump;
981
982static void sigbus_handler(int signal)
983{
984 siglongjmp(sigjump, 1);
985}
986
Alex Williamson04b16652010-07-02 11:13:17 -0600987static void *file_ram_alloc(RAMBlock *block,
988 ram_addr_t memory,
989 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -0300990{
991 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -0500992 char *sanitized_name;
993 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300994 void *area;
995 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300996 unsigned long hpagesize;
997
998 hpagesize = gethugepagesize(path);
999 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001000 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001001 }
1002
1003 if (memory < hpagesize) {
1004 return NULL;
1005 }
1006
1007 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1008 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1009 return NULL;
1010 }
1011
Peter Feiner8ca761f2013-03-04 13:54:25 -05001012 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1013 sanitized_name = g_strdup(block->mr->name);
1014 for (c = sanitized_name; *c != '\0'; c++) {
1015 if (*c == '/')
1016 *c = '_';
1017 }
1018
1019 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1020 sanitized_name);
1021 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001022
1023 fd = mkstemp(filename);
1024 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001025 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001026 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001027 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001028 }
1029 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001030 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001031
1032 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1033
1034 /*
1035 * ftruncate is not supported by hugetlbfs in older
1036 * hosts, so don't bother bailing out on errors.
1037 * If anything goes wrong with it under other filesystems,
1038 * mmap will fail.
1039 */
1040 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001041 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001042
Marcelo Tosattic9027602010-03-01 20:25:08 -03001043 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001044 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001045 perror("file_ram_alloc: can't mmap RAM pages");
1046 close(fd);
1047 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001048 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001049
1050 if (mem_prealloc) {
1051 int ret, i;
1052 struct sigaction act, oldact;
1053 sigset_t set, oldset;
1054
1055 memset(&act, 0, sizeof(act));
1056 act.sa_handler = &sigbus_handler;
1057 act.sa_flags = 0;
1058
1059 ret = sigaction(SIGBUS, &act, &oldact);
1060 if (ret) {
1061 perror("file_ram_alloc: failed to install signal handler");
1062 exit(1);
1063 }
1064
1065 /* unblock SIGBUS */
1066 sigemptyset(&set);
1067 sigaddset(&set, SIGBUS);
1068 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1069
1070 if (sigsetjmp(sigjump, 1)) {
1071 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1072 exit(1);
1073 }
1074
1075 /* MAP_POPULATE silently ignores failures */
1076 for (i = 0; i < (memory/hpagesize)-1; i++) {
1077 memset(area + (hpagesize*i), 0, 1);
1078 }
1079
1080 ret = sigaction(SIGBUS, &oldact, NULL);
1081 if (ret) {
1082 perror("file_ram_alloc: failed to reinstall signal handler");
1083 exit(1);
1084 }
1085
1086 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1087 }
1088
Alex Williamson04b16652010-07-02 11:13:17 -06001089 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001090 return area;
1091}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001092#else
1093static void *file_ram_alloc(RAMBlock *block,
1094 ram_addr_t memory,
1095 const char *path)
1096{
1097 fprintf(stderr, "-mem-path not supported on this host\n");
1098 exit(1);
1099}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001100#endif
1101
Alex Williamsond17b5282010-06-25 11:08:38 -06001102static ram_addr_t find_ram_offset(ram_addr_t size)
1103{
Alex Williamson04b16652010-07-02 11:13:17 -06001104 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001105 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001106
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001107 assert(size != 0); /* it would hand out same offset multiple times */
1108
Paolo Bonzinia3161032012-11-14 15:54:48 +01001109 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001110 return 0;
1111
Paolo Bonzinia3161032012-11-14 15:54:48 +01001112 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001113 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001114
1115 end = block->offset + block->length;
1116
Paolo Bonzinia3161032012-11-14 15:54:48 +01001117 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001118 if (next_block->offset >= end) {
1119 next = MIN(next, next_block->offset);
1120 }
1121 }
1122 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001123 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001124 mingap = next - end;
1125 }
1126 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001127
1128 if (offset == RAM_ADDR_MAX) {
1129 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1130 (uint64_t)size);
1131 abort();
1132 }
1133
Alex Williamson04b16652010-07-02 11:13:17 -06001134 return offset;
1135}
1136
Juan Quintela652d7ec2012-07-20 10:37:54 +02001137ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001138{
Alex Williamsond17b5282010-06-25 11:08:38 -06001139 RAMBlock *block;
1140 ram_addr_t last = 0;
1141
Paolo Bonzinia3161032012-11-14 15:54:48 +01001142 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001143 last = MAX(last, block->offset + block->length);
1144
1145 return last;
1146}
1147
Jason Baronddb97f12012-08-02 15:44:16 -04001148static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1149{
1150 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001151
1152 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001153 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1154 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001155 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1156 if (ret) {
1157 perror("qemu_madvise");
1158 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1159 "but dump_guest_core=off specified\n");
1160 }
1161 }
1162}
1163
Avi Kivityc5705a72011-12-20 15:59:12 +02001164void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001165{
1166 RAMBlock *new_block, *block;
1167
Avi Kivityc5705a72011-12-20 15:59:12 +02001168 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001169 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001170 if (block->offset == addr) {
1171 new_block = block;
1172 break;
1173 }
1174 }
1175 assert(new_block);
1176 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001177
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001178 if (dev) {
1179 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001180 if (id) {
1181 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001182 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001183 }
1184 }
1185 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1186
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001187 /* This assumes the iothread lock is taken here too. */
1188 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001189 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001190 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001191 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1192 new_block->idstr);
1193 abort();
1194 }
1195 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001196 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001197}
1198
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001199static int memory_try_enable_merging(void *addr, size_t len)
1200{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001201 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001202 /* disabled by the user */
1203 return 0;
1204 }
1205
1206 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1207}
1208
Avi Kivityc5705a72011-12-20 15:59:12 +02001209ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1210 MemoryRegion *mr)
1211{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001212 RAMBlock *block, *new_block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001213
1214 size = TARGET_PAGE_ALIGN(size);
1215 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001216 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001217
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001218 /* This assumes the iothread lock is taken here too. */
1219 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001220 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001221 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001222 if (host) {
1223 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001224 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001225 } else if (xen_enabled()) {
1226 if (mem_path) {
1227 fprintf(stderr, "-mem-path not supported with Xen\n");
1228 exit(1);
1229 }
1230 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001231 } else {
1232 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001233 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1234 /*
1235 * file_ram_alloc() needs to allocate just like
1236 * phys_mem_alloc, but we haven't bothered to provide
1237 * a hook there.
1238 */
1239 fprintf(stderr,
1240 "-mem-path not supported with this accelerator\n");
1241 exit(1);
1242 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001243 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001244 }
1245 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001246 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001247 if (!new_block->host) {
1248 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1249 new_block->mr->name, strerror(errno));
1250 exit(1);
1251 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001252 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001253 }
1254 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001255 new_block->length = size;
1256
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001257 /* Keep the list sorted from biggest to smallest block. */
1258 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1259 if (block->length < new_block->length) {
1260 break;
1261 }
1262 }
1263 if (block) {
1264 QTAILQ_INSERT_BEFORE(block, new_block, next);
1265 } else {
1266 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1267 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001268 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001269
Umesh Deshpandef798b072011-08-18 11:41:17 -07001270 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001271 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001272
Anthony Liguori7267c092011-08-20 22:09:37 -05001273 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06001274 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04001275 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1276 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02001277 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001278
Jason Baronddb97f12012-08-02 15:44:16 -04001279 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001280 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001281 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001282
Cam Macdonell84b89d72010-07-26 18:10:57 -06001283 if (kvm_enabled())
1284 kvm_setup_guest_memory(new_block->host, size);
1285
1286 return new_block->offset;
1287}
1288
Avi Kivityc5705a72011-12-20 15:59:12 +02001289ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001290{
Avi Kivityc5705a72011-12-20 15:59:12 +02001291 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001292}
bellarde9a1ab12007-02-08 23:08:38 +00001293
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001294void qemu_ram_free_from_ptr(ram_addr_t addr)
1295{
1296 RAMBlock *block;
1297
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001298 /* This assumes the iothread lock is taken here too. */
1299 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001300 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001301 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001302 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001303 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001304 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001305 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001306 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001307 }
1308 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001309 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001310}
1311
Anthony Liguoric227f092009-10-01 16:12:16 -05001312void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001313{
Alex Williamson04b16652010-07-02 11:13:17 -06001314 RAMBlock *block;
1315
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001316 /* This assumes the iothread lock is taken here too. */
1317 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001318 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001319 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001320 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001321 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001322 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001323 if (block->flags & RAM_PREALLOC_MASK) {
1324 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001325 } else if (xen_enabled()) {
1326 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001327#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001328 } else if (block->fd >= 0) {
1329 munmap(block->host, block->length);
1330 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001331#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001332 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001333 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001334 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001335 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001336 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001337 }
1338 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001339 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001340
bellarde9a1ab12007-02-08 23:08:38 +00001341}
1342
Huang Yingcd19cfa2011-03-02 08:56:19 +01001343#ifndef _WIN32
1344void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1345{
1346 RAMBlock *block;
1347 ram_addr_t offset;
1348 int flags;
1349 void *area, *vaddr;
1350
Paolo Bonzinia3161032012-11-14 15:54:48 +01001351 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001352 offset = addr - block->offset;
1353 if (offset < block->length) {
1354 vaddr = block->host + offset;
1355 if (block->flags & RAM_PREALLOC_MASK) {
1356 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001357 } else if (xen_enabled()) {
1358 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001359 } else {
1360 flags = MAP_FIXED;
1361 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001362 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001363#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001364 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1365 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001366#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001367 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001368#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001369 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1370 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001371 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001372 /*
1373 * Remap needs to match alloc. Accelerators that
1374 * set phys_mem_alloc never remap. If they did,
1375 * we'd need a remap hook here.
1376 */
1377 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1378
Huang Yingcd19cfa2011-03-02 08:56:19 +01001379 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1380 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1381 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001382 }
1383 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001384 fprintf(stderr, "Could not remap addr: "
1385 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001386 length, addr);
1387 exit(1);
1388 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001389 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001390 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001391 }
1392 return;
1393 }
1394 }
1395}
1396#endif /* !_WIN32 */
1397
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001398/* Return a host pointer to ram allocated with qemu_ram_alloc.
1399 With the exception of the softmmu code in this file, this should
1400 only be used for local memory (e.g. video ram) that the device owns,
1401 and knows it isn't going to access beyond the end of the block.
1402
1403 It should not be used for general purpose DMA.
1404 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1405 */
1406void *qemu_get_ram_ptr(ram_addr_t addr)
1407{
1408 RAMBlock *block = qemu_get_ram_block(addr);
1409
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001410 if (xen_enabled()) {
1411 /* We need to check if the requested address is in the RAM
1412 * because we don't want to map the entire memory in QEMU.
1413 * In that case just map until the end of the page.
1414 */
1415 if (block->offset == 0) {
1416 return xen_map_cache(addr, 0, 0);
1417 } else if (block->host == NULL) {
1418 block->host =
1419 xen_map_cache(block->offset, block->length, 1);
1420 }
1421 }
1422 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001423}
1424
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001425/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1426 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001427static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001428{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001429 if (*size == 0) {
1430 return NULL;
1431 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001432 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001433 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001434 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001435 RAMBlock *block;
1436
Paolo Bonzinia3161032012-11-14 15:54:48 +01001437 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001438 if (addr - block->offset < block->length) {
1439 if (addr - block->offset + *size > block->length)
1440 *size = block->length - addr + block->offset;
1441 return block->host + (addr - block->offset);
1442 }
1443 }
1444
1445 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1446 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001447 }
1448}
1449
Paolo Bonzini7443b432013-06-03 12:44:02 +02001450/* Some of the softmmu routines need to translate from a host pointer
1451 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001452MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001453{
pbrook94a6b542009-04-11 17:15:54 +00001454 RAMBlock *block;
1455 uint8_t *host = ptr;
1456
Jan Kiszka868bb332011-06-21 22:59:09 +02001457 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001458 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001459 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001460 }
1461
Paolo Bonzini23887b72013-05-06 14:28:39 +02001462 block = ram_list.mru_block;
1463 if (block && block->host && host - block->host < block->length) {
1464 goto found;
1465 }
1466
Paolo Bonzinia3161032012-11-14 15:54:48 +01001467 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001468 /* This case append when the block is not mapped. */
1469 if (block->host == NULL) {
1470 continue;
1471 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001472 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001473 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001474 }
pbrook94a6b542009-04-11 17:15:54 +00001475 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001476
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001477 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001478
1479found:
1480 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001481 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001482}
Alex Williamsonf471a172010-06-11 11:11:42 -06001483
Avi Kivitya8170e52012-10-23 12:30:10 +02001484static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001485 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001486{
bellard3a7d9292005-08-21 09:26:42 +00001487 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001488 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001489 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001490 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001491 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00001492 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001493 switch (size) {
1494 case 1:
1495 stb_p(qemu_get_ram_ptr(ram_addr), val);
1496 break;
1497 case 2:
1498 stw_p(qemu_get_ram_ptr(ram_addr), val);
1499 break;
1500 case 4:
1501 stl_p(qemu_get_ram_ptr(ram_addr), val);
1502 break;
1503 default:
1504 abort();
1505 }
bellardf23db162005-08-21 19:12:28 +00001506 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001507 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001508 /* we remove the notdirty callback only if the code has been
1509 flushed */
Andreas Färber4917cf42013-05-27 05:17:50 +02001510 if (dirty_flags == 0xff) {
1511 CPUArchState *env = current_cpu->env_ptr;
1512 tlb_set_dirty(env, env->mem_io_vaddr);
1513 }
bellard1ccde1c2004-02-06 19:46:14 +00001514}
1515
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001516static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1517 unsigned size, bool is_write)
1518{
1519 return is_write;
1520}
1521
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001522static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001523 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001524 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001525 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001526};
1527
pbrook0f459d12008-06-09 00:20:13 +00001528/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001529static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001530{
Andreas Färber4917cf42013-05-27 05:17:50 +02001531 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001532 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001533 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001534 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001535 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001536
aliguori06d55cc2008-11-18 20:24:06 +00001537 if (env->watchpoint_hit) {
1538 /* We re-entered the check after replacing the TB. Now raise
1539 * the debug interrupt so that is will trigger after the
1540 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001541 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001542 return;
1543 }
pbrook2e70f6e2008-06-29 01:03:05 +00001544 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001545 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001546 if ((vaddr == (wp->vaddr & len_mask) ||
1547 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001548 wp->flags |= BP_WATCHPOINT_HIT;
1549 if (!env->watchpoint_hit) {
1550 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001551 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001552 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1553 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001554 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001555 } else {
1556 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1557 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001558 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001559 }
aliguori06d55cc2008-11-18 20:24:06 +00001560 }
aliguori6e140f22008-11-18 20:37:55 +00001561 } else {
1562 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001563 }
1564 }
1565}
1566
pbrook6658ffb2007-03-16 23:58:11 +00001567/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1568 so these check for a hit then pass through to the normal out-of-line
1569 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001570static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001571 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001572{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001573 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1574 switch (size) {
1575 case 1: return ldub_phys(addr);
1576 case 2: return lduw_phys(addr);
1577 case 4: return ldl_phys(addr);
1578 default: abort();
1579 }
pbrook6658ffb2007-03-16 23:58:11 +00001580}
1581
Avi Kivitya8170e52012-10-23 12:30:10 +02001582static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001583 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001584{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001585 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1586 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001587 case 1:
1588 stb_phys(addr, val);
1589 break;
1590 case 2:
1591 stw_phys(addr, val);
1592 break;
1593 case 4:
1594 stl_phys(addr, val);
1595 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001596 default: abort();
1597 }
pbrook6658ffb2007-03-16 23:58:11 +00001598}
1599
Avi Kivity1ec9b902012-01-02 12:47:48 +02001600static const MemoryRegionOps watch_mem_ops = {
1601 .read = watch_mem_read,
1602 .write = watch_mem_write,
1603 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001604};
pbrook6658ffb2007-03-16 23:58:11 +00001605
Avi Kivitya8170e52012-10-23 12:30:10 +02001606static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001607 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001608{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001609 subpage_t *subpage = opaque;
1610 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001611
blueswir1db7b5422007-05-26 17:36:03 +00001612#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001613 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001614 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001615#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001616 address_space_read(subpage->as, addr + subpage->base, buf, len);
1617 switch (len) {
1618 case 1:
1619 return ldub_p(buf);
1620 case 2:
1621 return lduw_p(buf);
1622 case 4:
1623 return ldl_p(buf);
1624 default:
1625 abort();
1626 }
blueswir1db7b5422007-05-26 17:36:03 +00001627}
1628
Avi Kivitya8170e52012-10-23 12:30:10 +02001629static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001630 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001631{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001632 subpage_t *subpage = opaque;
1633 uint8_t buf[4];
1634
blueswir1db7b5422007-05-26 17:36:03 +00001635#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001636 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001637 " value %"PRIx64"\n",
1638 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001639#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001640 switch (len) {
1641 case 1:
1642 stb_p(buf, value);
1643 break;
1644 case 2:
1645 stw_p(buf, value);
1646 break;
1647 case 4:
1648 stl_p(buf, value);
1649 break;
1650 default:
1651 abort();
1652 }
1653 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001654}
1655
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001656static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001657 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001658{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001659 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001660#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001661 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001662 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001663#endif
1664
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001665 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001666 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001667}
1668
Avi Kivity70c68e42012-01-02 12:32:48 +02001669static const MemoryRegionOps subpage_ops = {
1670 .read = subpage_read,
1671 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001672 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001673 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001674};
1675
Anthony Liguoric227f092009-10-01 16:12:16 -05001676static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001677 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001678{
1679 int idx, eidx;
1680
1681 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1682 return -1;
1683 idx = SUBPAGE_IDX(start);
1684 eidx = SUBPAGE_IDX(end);
1685#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001686 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1687 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001688#endif
blueswir1db7b5422007-05-26 17:36:03 +00001689 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001690 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001691 }
1692
1693 return 0;
1694}
1695
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001696static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001697{
Anthony Liguoric227f092009-10-01 16:12:16 -05001698 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001699
Anthony Liguori7267c092011-08-20 22:09:37 -05001700 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001701
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001702 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001703 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001704 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001705 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001706 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001707#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001708 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1709 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001710#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001711 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001712
1713 return mmio;
1714}
1715
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001716static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001717{
1718 MemoryRegionSection section = {
1719 .mr = mr,
1720 .offset_within_address_space = 0,
1721 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001722 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001723 };
1724
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001725 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001726}
1727
Avi Kivitya8170e52012-10-23 12:30:10 +02001728MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001729{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001730 return address_space_memory.dispatch->map.sections[
1731 index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001732}
1733
Avi Kivitye9179ce2009-06-14 11:38:52 +03001734static void io_mem_init(void)
1735{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001736 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1737 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001738 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001739 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001740 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001741 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001742 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001743}
1744
Avi Kivityac1970f2012-10-03 16:22:53 +02001745static void mem_begin(MemoryListener *listener)
1746{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001747 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001748 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1749 uint16_t n;
1750
1751 n = dummy_section(&d->map, &io_mem_unassigned);
1752 assert(n == PHYS_SECTION_UNASSIGNED);
1753 n = dummy_section(&d->map, &io_mem_notdirty);
1754 assert(n == PHYS_SECTION_NOTDIRTY);
1755 n = dummy_section(&d->map, &io_mem_rom);
1756 assert(n == PHYS_SECTION_ROM);
1757 n = dummy_section(&d->map, &io_mem_watch);
1758 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001759
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001760 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001761 d->as = as;
1762 as->next_dispatch = d;
1763}
1764
1765static void mem_commit(MemoryListener *listener)
1766{
1767 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001768 AddressSpaceDispatch *cur = as->dispatch;
1769 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001770
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001771 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001772
Paolo Bonzini0475d942013-05-29 12:28:21 +02001773 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001774
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001775 if (cur) {
1776 phys_sections_free(&cur->map);
1777 g_free(cur);
1778 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001779}
1780
Avi Kivity1d711482012-10-02 18:54:45 +02001781static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001782{
Andreas Färber182735e2013-05-29 22:29:20 +02001783 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001784
1785 /* since each CPU stores ram addresses in its TLB cache, we must
1786 reset the modified entries */
1787 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001788 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001789 CPUArchState *env = cpu->env_ptr;
1790
Avi Kivity117712c2012-02-12 21:23:17 +02001791 tlb_flush(env, 1);
1792 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001793}
1794
Avi Kivity93632742012-02-08 16:54:16 +02001795static void core_log_global_start(MemoryListener *listener)
1796{
1797 cpu_physical_memory_set_dirty_tracking(1);
1798}
1799
1800static void core_log_global_stop(MemoryListener *listener)
1801{
1802 cpu_physical_memory_set_dirty_tracking(0);
1803}
1804
Avi Kivity93632742012-02-08 16:54:16 +02001805static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001806 .log_global_start = core_log_global_start,
1807 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001808 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001809};
1810
Avi Kivity1d711482012-10-02 18:54:45 +02001811static MemoryListener tcg_memory_listener = {
1812 .commit = tcg_commit,
1813};
1814
Avi Kivityac1970f2012-10-03 16:22:53 +02001815void address_space_init_dispatch(AddressSpace *as)
1816{
Paolo Bonzini00752702013-05-29 12:13:54 +02001817 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001818 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001819 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001820 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001821 .region_add = mem_add,
1822 .region_nop = mem_add,
1823 .priority = 0,
1824 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001825 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001826}
1827
Avi Kivity83f3c252012-10-07 12:59:55 +02001828void address_space_destroy_dispatch(AddressSpace *as)
1829{
1830 AddressSpaceDispatch *d = as->dispatch;
1831
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001832 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001833 g_free(d);
1834 as->dispatch = NULL;
1835}
1836
Avi Kivity62152b82011-07-26 14:26:14 +03001837static void memory_map_init(void)
1838{
Anthony Liguori7267c092011-08-20 22:09:37 -05001839 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001840
Paolo Bonzini57271d62013-11-07 17:14:37 +01001841 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001842 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001843
Anthony Liguori7267c092011-08-20 22:09:37 -05001844 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001845 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1846 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001847 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001848
Avi Kivityf6790af2012-10-02 20:13:51 +02001849 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001850 if (tcg_enabled()) {
1851 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1852 }
Avi Kivity62152b82011-07-26 14:26:14 +03001853}
1854
1855MemoryRegion *get_system_memory(void)
1856{
1857 return system_memory;
1858}
1859
Avi Kivity309cb472011-08-08 16:09:03 +03001860MemoryRegion *get_system_io(void)
1861{
1862 return system_io;
1863}
1864
pbrooke2eef172008-06-08 01:09:01 +00001865#endif /* !defined(CONFIG_USER_ONLY) */
1866
bellard13eb76e2004-01-24 15:23:36 +00001867/* physical memory access (slow version, mainly for debug) */
1868#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001869int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001870 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001871{
1872 int l, flags;
1873 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001874 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001875
1876 while (len > 0) {
1877 page = addr & TARGET_PAGE_MASK;
1878 l = (page + TARGET_PAGE_SIZE) - addr;
1879 if (l > len)
1880 l = len;
1881 flags = page_get_flags(page);
1882 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001883 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001884 if (is_write) {
1885 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001886 return -1;
bellard579a97f2007-11-11 14:26:47 +00001887 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001888 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001889 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001890 memcpy(p, buf, l);
1891 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001892 } else {
1893 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001894 return -1;
bellard579a97f2007-11-11 14:26:47 +00001895 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001896 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001897 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001898 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001899 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001900 }
1901 len -= l;
1902 buf += l;
1903 addr += l;
1904 }
Paul Brooka68fe892010-03-01 00:08:59 +00001905 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001906}
bellard8df1cd02005-01-28 22:37:22 +00001907
bellard13eb76e2004-01-24 15:23:36 +00001908#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001909
Avi Kivitya8170e52012-10-23 12:30:10 +02001910static void invalidate_and_set_dirty(hwaddr addr,
1911 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001912{
1913 if (!cpu_physical_memory_is_dirty(addr)) {
1914 /* invalidate code */
1915 tb_invalidate_phys_page_range(addr, addr + length, 0);
1916 /* set dirty bit */
1917 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1918 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001919 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001920}
1921
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02001922static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1923{
1924 if (memory_region_is_ram(mr)) {
1925 return !(is_write && mr->readonly);
1926 }
1927 if (memory_region_is_romd(mr)) {
1928 return !is_write;
1929 }
1930
1931 return false;
1932}
1933
Richard Henderson23326162013-07-08 14:55:59 -07001934static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001935{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001936 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001937
1938 /* Regions are assumed to support 1-4 byte accesses unless
1939 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001940 if (access_size_max == 0) {
1941 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001942 }
Richard Henderson23326162013-07-08 14:55:59 -07001943
1944 /* Bound the maximum access by the alignment of the address. */
1945 if (!mr->ops->impl.unaligned) {
1946 unsigned align_size_max = addr & -addr;
1947 if (align_size_max != 0 && align_size_max < access_size_max) {
1948 access_size_max = align_size_max;
1949 }
1950 }
1951
1952 /* Don't attempt accesses larger than the maximum. */
1953 if (l > access_size_max) {
1954 l = access_size_max;
1955 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001956 if (l & (l - 1)) {
1957 l = 1 << (qemu_fls(l) - 1);
1958 }
Richard Henderson23326162013-07-08 14:55:59 -07001959
1960 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001961}
1962
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001963bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001964 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001965{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001966 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001967 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001968 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001969 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001970 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001971 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001972
bellard13eb76e2004-01-24 15:23:36 +00001973 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001974 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001975 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001976
bellard13eb76e2004-01-24 15:23:36 +00001977 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001978 if (!memory_access_is_direct(mr, is_write)) {
1979 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001980 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001981 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001982 switch (l) {
1983 case 8:
1984 /* 64 bit write access */
1985 val = ldq_p(buf);
1986 error |= io_mem_write(mr, addr1, val, 8);
1987 break;
1988 case 4:
bellard1c213d12005-09-03 10:49:04 +00001989 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001990 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001991 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07001992 break;
1993 case 2:
bellard1c213d12005-09-03 10:49:04 +00001994 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00001995 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001996 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07001997 break;
1998 case 1:
bellard1c213d12005-09-03 10:49:04 +00001999 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002000 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002001 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002002 break;
2003 default:
2004 abort();
bellard13eb76e2004-01-24 15:23:36 +00002005 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002006 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002007 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002008 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002009 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002010 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002011 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002012 }
2013 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002014 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002015 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002016 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002017 switch (l) {
2018 case 8:
2019 /* 64 bit read access */
2020 error |= io_mem_read(mr, addr1, &val, 8);
2021 stq_p(buf, val);
2022 break;
2023 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002024 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002025 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002026 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002027 break;
2028 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002029 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002030 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002031 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002032 break;
2033 case 1:
bellard1c213d12005-09-03 10:49:04 +00002034 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002035 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002036 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002037 break;
2038 default:
2039 abort();
bellard13eb76e2004-01-24 15:23:36 +00002040 }
2041 } else {
2042 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002043 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002044 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002045 }
2046 }
2047 len -= l;
2048 buf += l;
2049 addr += l;
2050 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002051
2052 return error;
bellard13eb76e2004-01-24 15:23:36 +00002053}
bellard8df1cd02005-01-28 22:37:22 +00002054
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002055bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002056 const uint8_t *buf, int len)
2057{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002058 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002059}
2060
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002061bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002062{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002063 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002064}
2065
2066
Avi Kivitya8170e52012-10-23 12:30:10 +02002067void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002068 int len, int is_write)
2069{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002070 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002071}
2072
bellardd0ecd2a2006-04-23 17:14:48 +00002073/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02002074void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00002075 const uint8_t *buf, int len)
2076{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002077 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002078 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002079 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002080 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002081
bellardd0ecd2a2006-04-23 17:14:48 +00002082 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002083 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002084 mr = address_space_translate(&address_space_memory,
2085 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002086
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002087 if (!(memory_region_is_ram(mr) ||
2088 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002089 /* do nothing */
2090 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002091 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002092 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002093 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002094 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002095 invalidate_and_set_dirty(addr1, l);
bellardd0ecd2a2006-04-23 17:14:48 +00002096 }
2097 len -= l;
2098 buf += l;
2099 addr += l;
2100 }
2101}
2102
aliguori6d16c2f2009-01-22 16:59:11 +00002103typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002104 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002105 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002106 hwaddr addr;
2107 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002108} BounceBuffer;
2109
2110static BounceBuffer bounce;
2111
aliguoriba223c22009-01-22 16:59:16 +00002112typedef struct MapClient {
2113 void *opaque;
2114 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002115 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002116} MapClient;
2117
Blue Swirl72cf2d42009-09-12 07:36:22 +00002118static QLIST_HEAD(map_client_list, MapClient) map_client_list
2119 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002120
2121void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2122{
Anthony Liguori7267c092011-08-20 22:09:37 -05002123 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002124
2125 client->opaque = opaque;
2126 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002127 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002128 return client;
2129}
2130
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002131static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002132{
2133 MapClient *client = (MapClient *)_client;
2134
Blue Swirl72cf2d42009-09-12 07:36:22 +00002135 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002136 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002137}
2138
2139static void cpu_notify_map_clients(void)
2140{
2141 MapClient *client;
2142
Blue Swirl72cf2d42009-09-12 07:36:22 +00002143 while (!QLIST_EMPTY(&map_client_list)) {
2144 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002145 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002146 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002147 }
2148}
2149
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002150bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2151{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002152 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002153 hwaddr l, xlat;
2154
2155 while (len > 0) {
2156 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002157 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2158 if (!memory_access_is_direct(mr, is_write)) {
2159 l = memory_access_size(mr, l, addr);
2160 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002161 return false;
2162 }
2163 }
2164
2165 len -= l;
2166 addr += l;
2167 }
2168 return true;
2169}
2170
aliguori6d16c2f2009-01-22 16:59:11 +00002171/* Map a physical memory region into a host virtual address.
2172 * May map a subset of the requested range, given by and returned in *plen.
2173 * May return NULL if resources needed to perform the mapping are exhausted.
2174 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002175 * Use cpu_register_map_client() to know when retrying the map operation is
2176 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002177 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002178void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002179 hwaddr addr,
2180 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002181 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002182{
Avi Kivitya8170e52012-10-23 12:30:10 +02002183 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002184 hwaddr done = 0;
2185 hwaddr l, xlat, base;
2186 MemoryRegion *mr, *this_mr;
2187 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002188
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002189 if (len == 0) {
2190 return NULL;
2191 }
aliguori6d16c2f2009-01-22 16:59:11 +00002192
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002193 l = len;
2194 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2195 if (!memory_access_is_direct(mr, is_write)) {
2196 if (bounce.buffer) {
2197 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002198 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002199 /* Avoid unbounded allocations */
2200 l = MIN(l, TARGET_PAGE_SIZE);
2201 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002202 bounce.addr = addr;
2203 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002204
2205 memory_region_ref(mr);
2206 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002207 if (!is_write) {
2208 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002209 }
aliguori6d16c2f2009-01-22 16:59:11 +00002210
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002211 *plen = l;
2212 return bounce.buffer;
2213 }
2214
2215 base = xlat;
2216 raddr = memory_region_get_ram_addr(mr);
2217
2218 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002219 len -= l;
2220 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002221 done += l;
2222 if (len == 0) {
2223 break;
2224 }
2225
2226 l = len;
2227 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2228 if (this_mr != mr || xlat != base + done) {
2229 break;
2230 }
aliguori6d16c2f2009-01-22 16:59:11 +00002231 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002232
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002233 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002234 *plen = done;
2235 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002236}
2237
Avi Kivityac1970f2012-10-03 16:22:53 +02002238/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002239 * Will also mark the memory as dirty if is_write == 1. access_len gives
2240 * the amount of memory that was actually read or written by the caller.
2241 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002242void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2243 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002244{
2245 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002246 MemoryRegion *mr;
2247 ram_addr_t addr1;
2248
2249 mr = qemu_ram_addr_from_host(buffer, &addr1);
2250 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002251 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002252 while (access_len) {
2253 unsigned l;
2254 l = TARGET_PAGE_SIZE;
2255 if (l > access_len)
2256 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002257 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002258 addr1 += l;
2259 access_len -= l;
2260 }
2261 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002262 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002263 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002264 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002265 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002266 return;
2267 }
2268 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002269 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002270 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002271 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002272 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002273 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002274 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002275}
bellardd0ecd2a2006-04-23 17:14:48 +00002276
Avi Kivitya8170e52012-10-23 12:30:10 +02002277void *cpu_physical_memory_map(hwaddr addr,
2278 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002279 int is_write)
2280{
2281 return address_space_map(&address_space_memory, addr, plen, is_write);
2282}
2283
Avi Kivitya8170e52012-10-23 12:30:10 +02002284void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2285 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002286{
2287 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2288}
2289
bellard8df1cd02005-01-28 22:37:22 +00002290/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002291static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002292 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002293{
bellard8df1cd02005-01-28 22:37:22 +00002294 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002295 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002296 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002297 hwaddr l = 4;
2298 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002299
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002300 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2301 false);
2302 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002303 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002304 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002305#if defined(TARGET_WORDS_BIGENDIAN)
2306 if (endian == DEVICE_LITTLE_ENDIAN) {
2307 val = bswap32(val);
2308 }
2309#else
2310 if (endian == DEVICE_BIG_ENDIAN) {
2311 val = bswap32(val);
2312 }
2313#endif
bellard8df1cd02005-01-28 22:37:22 +00002314 } else {
2315 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002316 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002317 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002318 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002319 switch (endian) {
2320 case DEVICE_LITTLE_ENDIAN:
2321 val = ldl_le_p(ptr);
2322 break;
2323 case DEVICE_BIG_ENDIAN:
2324 val = ldl_be_p(ptr);
2325 break;
2326 default:
2327 val = ldl_p(ptr);
2328 break;
2329 }
bellard8df1cd02005-01-28 22:37:22 +00002330 }
2331 return val;
2332}
2333
Avi Kivitya8170e52012-10-23 12:30:10 +02002334uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002335{
2336 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2337}
2338
Avi Kivitya8170e52012-10-23 12:30:10 +02002339uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002340{
2341 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2342}
2343
Avi Kivitya8170e52012-10-23 12:30:10 +02002344uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002345{
2346 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2347}
2348
bellard84b7b8e2005-11-28 21:19:04 +00002349/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002350static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002351 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002352{
bellard84b7b8e2005-11-28 21:19:04 +00002353 uint8_t *ptr;
2354 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002355 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002356 hwaddr l = 8;
2357 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002358
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002359 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2360 false);
2361 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002362 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002363 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002364#if defined(TARGET_WORDS_BIGENDIAN)
2365 if (endian == DEVICE_LITTLE_ENDIAN) {
2366 val = bswap64(val);
2367 }
2368#else
2369 if (endian == DEVICE_BIG_ENDIAN) {
2370 val = bswap64(val);
2371 }
2372#endif
bellard84b7b8e2005-11-28 21:19:04 +00002373 } else {
2374 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002375 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002376 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002377 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002378 switch (endian) {
2379 case DEVICE_LITTLE_ENDIAN:
2380 val = ldq_le_p(ptr);
2381 break;
2382 case DEVICE_BIG_ENDIAN:
2383 val = ldq_be_p(ptr);
2384 break;
2385 default:
2386 val = ldq_p(ptr);
2387 break;
2388 }
bellard84b7b8e2005-11-28 21:19:04 +00002389 }
2390 return val;
2391}
2392
Avi Kivitya8170e52012-10-23 12:30:10 +02002393uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002394{
2395 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2396}
2397
Avi Kivitya8170e52012-10-23 12:30:10 +02002398uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002399{
2400 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2401}
2402
Avi Kivitya8170e52012-10-23 12:30:10 +02002403uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002404{
2405 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2406}
2407
bellardaab33092005-10-30 20:48:42 +00002408/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002409uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002410{
2411 uint8_t val;
2412 cpu_physical_memory_read(addr, &val, 1);
2413 return val;
2414}
2415
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002416/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002417static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002418 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002419{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002420 uint8_t *ptr;
2421 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002422 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002423 hwaddr l = 2;
2424 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002425
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002426 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2427 false);
2428 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002429 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002430 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002431#if defined(TARGET_WORDS_BIGENDIAN)
2432 if (endian == DEVICE_LITTLE_ENDIAN) {
2433 val = bswap16(val);
2434 }
2435#else
2436 if (endian == DEVICE_BIG_ENDIAN) {
2437 val = bswap16(val);
2438 }
2439#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002440 } else {
2441 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002442 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002443 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002444 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002445 switch (endian) {
2446 case DEVICE_LITTLE_ENDIAN:
2447 val = lduw_le_p(ptr);
2448 break;
2449 case DEVICE_BIG_ENDIAN:
2450 val = lduw_be_p(ptr);
2451 break;
2452 default:
2453 val = lduw_p(ptr);
2454 break;
2455 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002456 }
2457 return val;
bellardaab33092005-10-30 20:48:42 +00002458}
2459
Avi Kivitya8170e52012-10-23 12:30:10 +02002460uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002461{
2462 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2463}
2464
Avi Kivitya8170e52012-10-23 12:30:10 +02002465uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466{
2467 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2468}
2469
Avi Kivitya8170e52012-10-23 12:30:10 +02002470uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002471{
2472 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2473}
2474
bellard8df1cd02005-01-28 22:37:22 +00002475/* warning: addr must be aligned. The ram page is not masked as dirty
2476 and the code inside is not invalidated. It is useful if the dirty
2477 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002478void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002479{
bellard8df1cd02005-01-28 22:37:22 +00002480 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002481 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002482 hwaddr l = 4;
2483 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002484
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002485 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2486 true);
2487 if (l < 4 || !memory_access_is_direct(mr, true)) {
2488 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002489 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002490 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002491 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002492 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002493
2494 if (unlikely(in_migration)) {
2495 if (!cpu_physical_memory_is_dirty(addr1)) {
2496 /* invalidate code */
2497 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2498 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002499 cpu_physical_memory_set_dirty_flags(
2500 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00002501 }
2502 }
bellard8df1cd02005-01-28 22:37:22 +00002503 }
2504}
2505
2506/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002507static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002508 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002509{
bellard8df1cd02005-01-28 22:37:22 +00002510 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002511 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002512 hwaddr l = 4;
2513 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002514
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002515 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2516 true);
2517 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002518#if defined(TARGET_WORDS_BIGENDIAN)
2519 if (endian == DEVICE_LITTLE_ENDIAN) {
2520 val = bswap32(val);
2521 }
2522#else
2523 if (endian == DEVICE_BIG_ENDIAN) {
2524 val = bswap32(val);
2525 }
2526#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002527 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002528 } else {
bellard8df1cd02005-01-28 22:37:22 +00002529 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002530 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002531 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002532 switch (endian) {
2533 case DEVICE_LITTLE_ENDIAN:
2534 stl_le_p(ptr, val);
2535 break;
2536 case DEVICE_BIG_ENDIAN:
2537 stl_be_p(ptr, val);
2538 break;
2539 default:
2540 stl_p(ptr, val);
2541 break;
2542 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002543 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002544 }
2545}
2546
Avi Kivitya8170e52012-10-23 12:30:10 +02002547void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002548{
2549 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2550}
2551
Avi Kivitya8170e52012-10-23 12:30:10 +02002552void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002553{
2554 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2555}
2556
Avi Kivitya8170e52012-10-23 12:30:10 +02002557void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002558{
2559 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2560}
2561
bellardaab33092005-10-30 20:48:42 +00002562/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002563void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002564{
2565 uint8_t v = val;
2566 cpu_physical_memory_write(addr, &v, 1);
2567}
2568
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002569/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002570static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002571 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002572{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002573 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002574 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002575 hwaddr l = 2;
2576 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002577
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002578 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2579 true);
2580 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002581#if defined(TARGET_WORDS_BIGENDIAN)
2582 if (endian == DEVICE_LITTLE_ENDIAN) {
2583 val = bswap16(val);
2584 }
2585#else
2586 if (endian == DEVICE_BIG_ENDIAN) {
2587 val = bswap16(val);
2588 }
2589#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002590 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002591 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002592 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002593 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002594 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002595 switch (endian) {
2596 case DEVICE_LITTLE_ENDIAN:
2597 stw_le_p(ptr, val);
2598 break;
2599 case DEVICE_BIG_ENDIAN:
2600 stw_be_p(ptr, val);
2601 break;
2602 default:
2603 stw_p(ptr, val);
2604 break;
2605 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002606 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002607 }
bellardaab33092005-10-30 20:48:42 +00002608}
2609
Avi Kivitya8170e52012-10-23 12:30:10 +02002610void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002611{
2612 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2613}
2614
Avi Kivitya8170e52012-10-23 12:30:10 +02002615void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002616{
2617 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2618}
2619
Avi Kivitya8170e52012-10-23 12:30:10 +02002620void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002621{
2622 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2623}
2624
bellardaab33092005-10-30 20:48:42 +00002625/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002626void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002627{
2628 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002629 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002630}
2631
Avi Kivitya8170e52012-10-23 12:30:10 +02002632void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002633{
2634 val = cpu_to_le64(val);
2635 cpu_physical_memory_write(addr, &val, 8);
2636}
2637
Avi Kivitya8170e52012-10-23 12:30:10 +02002638void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002639{
2640 val = cpu_to_be64(val);
2641 cpu_physical_memory_write(addr, &val, 8);
2642}
2643
aliguori5e2972f2009-03-28 17:51:36 +00002644/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002645int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002646 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002647{
2648 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002649 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002650 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002651
2652 while (len > 0) {
2653 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002654 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002655 /* if no physical page mapped, return an error */
2656 if (phys_addr == -1)
2657 return -1;
2658 l = (page + TARGET_PAGE_SIZE) - addr;
2659 if (l > len)
2660 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002661 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002662 if (is_write)
2663 cpu_physical_memory_write_rom(phys_addr, buf, l);
2664 else
aliguori5e2972f2009-03-28 17:51:36 +00002665 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002666 len -= l;
2667 buf += l;
2668 addr += l;
2669 }
2670 return 0;
2671}
Paul Brooka68fe892010-03-01 00:08:59 +00002672#endif
bellard13eb76e2004-01-24 15:23:36 +00002673
Blue Swirl8e4a4242013-01-06 18:30:17 +00002674#if !defined(CONFIG_USER_ONLY)
2675
2676/*
2677 * A helper function for the _utterly broken_ virtio device model to find out if
2678 * it's running on a big endian machine. Don't do this at home kids!
2679 */
2680bool virtio_is_big_endian(void);
2681bool virtio_is_big_endian(void)
2682{
2683#if defined(TARGET_WORDS_BIGENDIAN)
2684 return true;
2685#else
2686 return false;
2687#endif
2688}
2689
2690#endif
2691
Wen Congyang76f35532012-05-07 12:04:18 +08002692#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002693bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002694{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002695 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002696 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002697
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002698 mr = address_space_translate(&address_space_memory,
2699 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002700
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002701 return !(memory_region_is_ram(mr) ||
2702 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002703}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002704
2705void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2706{
2707 RAMBlock *block;
2708
2709 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2710 func(block->host, block->offset, block->length, opaque);
2711 }
2712}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002713#endif