blob: 122e6e5f11bc1b7a0bb1f13b3a2843fab159d25f [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000048
Paolo Bonzini022c62c2012-12-17 18:19:49 +010049#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
Paolo Bonzini022c62c2012-12-17 18:19:49 +010052#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020053#include "exec/ram_addr.h"
Alexander Graf582b55a2013-12-11 14:17:44 +010054#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020055
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020056#include "qemu/range.h"
57
blueswir1db7b5422007-05-26 17:36:03 +000058//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000059
pbrook99773bd2006-04-16 15:14:59 +000060#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020061static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000062
Paolo Bonzinia3161032012-11-14 15:54:48 +010063RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030064
65static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030066static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030067
Avi Kivityf6790af2012-10-02 20:13:51 +020068AddressSpace address_space_io;
69AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020070
Paolo Bonzini0844e002013-05-24 14:37:28 +020071MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020072static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020073
pbrooke2eef172008-06-08 01:09:01 +000074#endif
bellard9fa3e852004-01-04 18:06:42 +000075
Andreas Färberbdc44642013-06-24 23:50:24 +020076struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000077/* current CPU in the current thread. It is only valid inside
78 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020079DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000080/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000081 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000082 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010083int use_icount;
bellard6a00d602005-11-21 23:25:50 +000084
pbrooke2eef172008-06-08 01:09:01 +000085#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020086
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020087typedef struct PhysPageEntry PhysPageEntry;
88
89struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020090 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020091 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020092 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020093 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020094};
95
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020096#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
97
Paolo Bonzini03f49952013-11-07 17:14:36 +010098/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010099#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100100
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200101#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100102#define P_L2_SIZE (1 << P_L2_BITS)
103
104#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
105
106typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200107
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200108typedef struct PhysPageMap {
109 unsigned sections_nb;
110 unsigned sections_nb_alloc;
111 unsigned nodes_nb;
112 unsigned nodes_nb_alloc;
113 Node *nodes;
114 MemoryRegionSection *sections;
115} PhysPageMap;
116
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200117struct AddressSpaceDispatch {
118 /* This is a multi-level map on the physical address space.
119 * The bottom level has pointers to MemoryRegionSections.
120 */
121 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200122 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200123 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200124};
125
Jan Kiszka90260c62013-05-26 21:46:51 +0200126#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
127typedef struct subpage_t {
128 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200129 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200130 hwaddr base;
131 uint16_t sub_section[TARGET_PAGE_SIZE];
132} subpage_t;
133
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200134#define PHYS_SECTION_UNASSIGNED 0
135#define PHYS_SECTION_NOTDIRTY 1
136#define PHYS_SECTION_ROM 2
137#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200138
pbrooke2eef172008-06-08 01:09:01 +0000139static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300140static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000141
Avi Kivity1ec9b902012-01-02 12:47:48 +0200142static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000143#endif
bellard54936002003-05-13 00:25:15 +0000144
Paul Brook6d9a1302010-02-28 23:55:53 +0000145#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200146
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200148{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200149 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
151 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
152 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200153 }
154}
155
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200156static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200157{
158 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200159 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200160
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200161 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200163 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100164 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 map->nodes[ret][i].skip = 1;
166 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200169}
170
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200171static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
172 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200173 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200174{
175 PhysPageEntry *p;
176 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100177 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200178
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200179 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200180 lp->ptr = phys_map_node_alloc(map);
181 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200182 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100183 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200184 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200185 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186 }
187 }
188 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100191 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192
Paolo Bonzini03f49952013-11-07 17:14:36 +0100193 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200194 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200195 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200197 *index += step;
198 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200199 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200201 }
202 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200203 }
204}
205
Avi Kivityac1970f2012-10-03 16:22:53 +0200206static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200207 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200208 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000209{
Avi Kivity29990972012-02-13 20:21:20 +0200210 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200211 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000214}
215
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200216/* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
218 */
219static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
220{
221 unsigned valid_ptr = P_L2_SIZE;
222 int valid = 0;
223 PhysPageEntry *p;
224 int i;
225
226 if (lp->ptr == PHYS_MAP_NODE_NIL) {
227 return;
228 }
229
230 p = nodes[lp->ptr];
231 for (i = 0; i < P_L2_SIZE; i++) {
232 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
233 continue;
234 }
235
236 valid_ptr = i;
237 valid++;
238 if (p[i].skip) {
239 phys_page_compact(&p[i], nodes, compacted);
240 }
241 }
242
243 /* We can only compress if there's only one child. */
244 if (valid != 1) {
245 return;
246 }
247
248 assert(valid_ptr < P_L2_SIZE);
249
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
252 return;
253 }
254
255 lp->ptr = p[valid_ptr].ptr;
256 if (!p[valid_ptr].skip) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
261 * change this rule.
262 */
263 lp->skip = 0;
264 } else {
265 lp->skip += p[valid_ptr].skip;
266 }
267}
268
269static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
270{
271 DECLARE_BITMAP(compacted, nodes_nb);
272
273 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200274 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200275 }
276}
277
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200278static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200279 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000280{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200281 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200282 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200283 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200284
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200285 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200286 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200287 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200288 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200289 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100290 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200291 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200292
293 if (sections[lp.ptr].size.hi ||
294 range_covers_byte(sections[lp.ptr].offset_within_address_space,
295 sections[lp.ptr].size.lo, addr)) {
296 return &sections[lp.ptr];
297 } else {
298 return &sections[PHYS_SECTION_UNASSIGNED];
299 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200300}
301
Blue Swirle5548612012-04-21 13:08:33 +0000302bool memory_region_is_unassigned(MemoryRegion *mr)
303{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200304 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000305 && mr != &io_mem_watch;
306}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200307
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200308static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200309 hwaddr addr,
310 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200311{
Jan Kiszka90260c62013-05-26 21:46:51 +0200312 MemoryRegionSection *section;
313 subpage_t *subpage;
314
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200315 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200316 if (resolve_subpage && section->mr->subpage) {
317 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200318 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200319 }
320 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200321}
322
Jan Kiszka90260c62013-05-26 21:46:51 +0200323static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200326{
327 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100328 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200329
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200330 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200331 /* Compute offset within MemoryRegionSection */
332 addr -= section->offset_within_address_space;
333
334 /* Compute offset within MemoryRegion */
335 *xlat = addr + section->offset_within_region;
336
337 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100338 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200339 return section;
340}
Jan Kiszka90260c62013-05-26 21:46:51 +0200341
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100342static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
343{
344 if (memory_region_is_ram(mr)) {
345 return !(is_write && mr->readonly);
346 }
347 if (memory_region_is_romd(mr)) {
348 return !is_write;
349 }
350
351 return false;
352}
353
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200354MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
355 hwaddr *xlat, hwaddr *plen,
356 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200357{
Avi Kivity30951152012-10-30 13:47:46 +0200358 IOMMUTLBEntry iotlb;
359 MemoryRegionSection *section;
360 MemoryRegion *mr;
361 hwaddr len = *plen;
362
363 for (;;) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200365 mr = section->mr;
366
367 if (!mr->iommu_ops) {
368 break;
369 }
370
371 iotlb = mr->iommu_ops->translate(mr, addr);
372 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
373 | (addr & iotlb.addr_mask));
374 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
375 if (!(iotlb.perm & (1 << is_write))) {
376 mr = &io_mem_unassigned;
377 break;
378 }
379
380 as = iotlb.target_as;
381 }
382
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100383 if (memory_access_is_direct(mr, is_write)) {
384 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
385 len = MIN(page, len);
386 }
387
Avi Kivity30951152012-10-30 13:47:46 +0200388 *plen = len;
389 *xlat = addr;
390 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200391}
392
393MemoryRegionSection *
394address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
395 hwaddr *plen)
396{
Avi Kivity30951152012-10-30 13:47:46 +0200397 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200398 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200399
400 assert(!section->mr->iommu_ops);
401 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200402}
bellard9fa3e852004-01-04 18:06:42 +0000403#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000404
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200405void cpu_exec_init_all(void)
406{
407#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700408 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200409 memory_map_init();
410 io_mem_init();
411#endif
412}
413
Andreas Färberb170fce2013-01-20 20:23:22 +0100414#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000415
Juan Quintelae59fb372009-09-29 22:48:21 +0200416static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200417{
Andreas Färber259186a2013-01-17 18:51:17 +0100418 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200419
aurel323098dba2009-03-07 21:28:24 +0000420 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
421 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100422 cpu->interrupt_request &= ~0x01;
423 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000424
425 return 0;
426}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200427
Andreas Färber1a1562f2013-06-17 04:09:11 +0200428const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200429 .name = "cpu_common",
430 .version_id = 1,
431 .minimum_version_id = 1,
432 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200433 .post_load = cpu_common_post_load,
434 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100435 VMSTATE_UINT32(halted, CPUState),
436 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200437 VMSTATE_END_OF_LIST()
438 }
439};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200440
pbrook9656f322008-07-01 20:01:19 +0000441#endif
442
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100443CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400444{
Andreas Färberbdc44642013-06-24 23:50:24 +0200445 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400446
Andreas Färberbdc44642013-06-24 23:50:24 +0200447 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100448 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200449 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100450 }
Glauber Costa950f1472009-06-09 12:15:18 -0400451 }
452
Andreas Färberbdc44642013-06-24 23:50:24 +0200453 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400454}
455
Andreas Färber9349b4f2012-03-14 01:38:32 +0100456void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000457{
Andreas Färber9f09e182012-05-03 06:59:07 +0200458 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100459 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200460 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000461 int cpu_index;
462
pbrookc2764712009-03-07 15:24:59 +0000463#if defined(CONFIG_USER_ONLY)
464 cpu_list_lock();
465#endif
bellard6a00d602005-11-21 23:25:50 +0000466 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200467 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000468 cpu_index++;
469 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100470 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100471 cpu->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000472 QTAILQ_INIT(&env->breakpoints);
473 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100474#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200475 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100476#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200477 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000478#if defined(CONFIG_USER_ONLY)
479 cpu_list_unlock();
480#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200481 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
482 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
483 }
pbrookb3c77242008-06-30 16:31:04 +0000484#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600485 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000486 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100487 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200488 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000489#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100490 if (cc->vmsd != NULL) {
491 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
492 }
bellardfd6ce8f2003-05-14 19:00:11 +0000493}
494
bellard1fddef42005-04-17 19:16:13 +0000495#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000496#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200497static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000498{
499 tb_invalidate_phys_page_range(pc, pc + 1, 0);
500}
501#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200502static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400503{
Max Filippove8262a12013-09-27 22:29:17 +0400504 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
505 if (phys != -1) {
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100506 tb_invalidate_phys_addr(&address_space_memory,
507 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400508 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400509}
bellardc27004e2005-01-03 23:35:10 +0000510#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000511#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000512
Paul Brookc527ee82010-03-01 03:31:14 +0000513#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100514void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000515
516{
517}
518
Andreas Färber9349b4f2012-03-14 01:38:32 +0100519int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +0000520 int flags, CPUWatchpoint **watchpoint)
521{
522 return -ENOSYS;
523}
524#else
pbrook6658ffb2007-03-16 23:58:11 +0000525/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100526int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000527 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000528{
aliguorib4051332008-11-18 20:14:20 +0000529 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000530 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000531
aliguorib4051332008-11-18 20:14:20 +0000532 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400533 if ((len & (len - 1)) || (addr & ~len_mask) ||
534 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +0000535 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
536 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
537 return -EINVAL;
538 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500539 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000540
aliguoria1d1bb32008-11-18 20:07:32 +0000541 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000542 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000543 wp->flags = flags;
544
aliguori2dc9f412008-11-18 20:56:59 +0000545 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +0000546 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000547 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +0000548 else
Blue Swirl72cf2d42009-09-12 07:36:22 +0000549 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +0000550
pbrook6658ffb2007-03-16 23:58:11 +0000551 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000552
553 if (watchpoint)
554 *watchpoint = wp;
555 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000556}
557
aliguoria1d1bb32008-11-18 20:07:32 +0000558/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100559int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +0000560 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000561{
aliguorib4051332008-11-18 20:14:20 +0000562 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000563 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000564
Blue Swirl72cf2d42009-09-12 07:36:22 +0000565 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000566 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000567 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +0000568 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000569 return 0;
570 }
571 }
aliguoria1d1bb32008-11-18 20:07:32 +0000572 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000573}
574
aliguoria1d1bb32008-11-18 20:07:32 +0000575/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100576void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000577{
Blue Swirl72cf2d42009-09-12 07:36:22 +0000578 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000579
aliguoria1d1bb32008-11-18 20:07:32 +0000580 tlb_flush_page(env, watchpoint->vaddr);
581
Anthony Liguori7267c092011-08-20 22:09:37 -0500582 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000583}
584
aliguoria1d1bb32008-11-18 20:07:32 +0000585/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100586void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000587{
aliguoric0ce9982008-11-25 22:13:57 +0000588 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000589
Blue Swirl72cf2d42009-09-12 07:36:22 +0000590 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000591 if (wp->flags & mask)
592 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +0000593 }
aliguoria1d1bb32008-11-18 20:07:32 +0000594}
Paul Brookc527ee82010-03-01 03:31:14 +0000595#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000596
597/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100598int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000599 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000600{
bellard1fddef42005-04-17 19:16:13 +0000601#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000602 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000603
Anthony Liguori7267c092011-08-20 22:09:37 -0500604 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000605
606 bp->pc = pc;
607 bp->flags = flags;
608
aliguori2dc9f412008-11-18 20:56:59 +0000609 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200610 if (flags & BP_GDB) {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000611 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200612 } else {
Blue Swirl72cf2d42009-09-12 07:36:22 +0000613 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200614 }
aliguoria1d1bb32008-11-18 20:07:32 +0000615
Andreas Färber00b941e2013-06-29 18:55:54 +0200616 breakpoint_invalidate(ENV_GET_CPU(env), pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000617
Andreas Färber00b941e2013-06-29 18:55:54 +0200618 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000619 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200620 }
aliguoria1d1bb32008-11-18 20:07:32 +0000621 return 0;
622#else
623 return -ENOSYS;
624#endif
625}
626
627/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100628int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000629{
630#if defined(TARGET_HAS_ICE)
631 CPUBreakpoint *bp;
632
Blue Swirl72cf2d42009-09-12 07:36:22 +0000633 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000634 if (bp->pc == pc && bp->flags == flags) {
635 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000636 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000637 }
bellard4c3a88a2003-07-26 12:06:08 +0000638 }
aliguoria1d1bb32008-11-18 20:07:32 +0000639 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000640#else
aliguoria1d1bb32008-11-18 20:07:32 +0000641 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000642#endif
643}
644
aliguoria1d1bb32008-11-18 20:07:32 +0000645/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100646void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000647{
bellard1fddef42005-04-17 19:16:13 +0000648#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +0000649 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +0000650
Andreas Färber00b941e2013-06-29 18:55:54 +0200651 breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000652
Anthony Liguori7267c092011-08-20 22:09:37 -0500653 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000654#endif
655}
656
657/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100658void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000659{
660#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000661 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000662
Blue Swirl72cf2d42009-09-12 07:36:22 +0000663 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +0000664 if (bp->flags & mask)
665 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +0000666 }
bellard4c3a88a2003-07-26 12:06:08 +0000667#endif
668}
669
bellardc33a3462003-07-29 20:50:33 +0000670/* enable or disable single step mode. EXCP_DEBUG is returned by the
671 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200672void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000673{
bellard1fddef42005-04-17 19:16:13 +0000674#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200675 if (cpu->singlestep_enabled != enabled) {
676 cpu->singlestep_enabled = enabled;
677 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200678 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200679 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100680 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000681 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200682 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000683 tb_flush(env);
684 }
bellardc33a3462003-07-29 20:50:33 +0000685 }
686#endif
687}
688
Andreas Färber9349b4f2012-03-14 01:38:32 +0100689void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000690{
Andreas Färber878096e2013-05-27 01:33:50 +0200691 CPUState *cpu = ENV_GET_CPU(env);
bellard75012672003-06-21 13:11:07 +0000692 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000693 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000694
695 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000696 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000697 fprintf(stderr, "qemu: fatal: ");
698 vfprintf(stderr, fmt, ap);
699 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200700 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000701 if (qemu_log_enabled()) {
702 qemu_log("qemu: fatal: ");
703 qemu_log_vprintf(fmt, ap2);
704 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200705 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000706 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000707 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000708 }
pbrook493ae1f2007-11-23 16:53:59 +0000709 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000710 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200711#if defined(CONFIG_USER_ONLY)
712 {
713 struct sigaction act;
714 sigfillset(&act.sa_mask);
715 act.sa_handler = SIG_DFL;
716 sigaction(SIGABRT, &act, NULL);
717 }
718#endif
bellard75012672003-06-21 13:11:07 +0000719 abort();
720}
721
bellard01243112004-01-04 15:48:17 +0000722#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200723static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
724{
725 RAMBlock *block;
726
727 /* The list is protected by the iothread lock here. */
728 block = ram_list.mru_block;
729 if (block && addr - block->offset < block->length) {
730 goto found;
731 }
732 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
733 if (addr - block->offset < block->length) {
734 goto found;
735 }
736 }
737
738 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
739 abort();
740
741found:
742 ram_list.mru_block = block;
743 return block;
744}
745
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200746static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000747{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200748 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200749 RAMBlock *block;
750 ram_addr_t end;
751
752 end = TARGET_PAGE_ALIGN(start + length);
753 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000754
Paolo Bonzini041603f2013-09-09 17:49:45 +0200755 block = qemu_get_ram_block(start);
756 assert(block == qemu_get_ram_block(end - 1));
757 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000758 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200759}
760
761/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200762void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200763 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200764{
Juan Quintelad24981d2012-05-22 00:42:40 +0200765 if (length == 0)
766 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200767 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200768
769 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200770 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200771 }
bellard1ccde1c2004-02-06 19:46:14 +0000772}
773
Juan Quintela981fdf22013-10-10 11:54:09 +0200774static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000775{
776 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000777}
778
Avi Kivitya8170e52012-10-23 12:30:10 +0200779hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200780 MemoryRegionSection *section,
781 target_ulong vaddr,
782 hwaddr paddr, hwaddr xlat,
783 int prot,
784 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000785{
Avi Kivitya8170e52012-10-23 12:30:10 +0200786 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000787 CPUWatchpoint *wp;
788
Blue Swirlcc5bea62012-04-14 14:56:48 +0000789 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000790 /* Normal RAM. */
791 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200792 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000793 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200794 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000795 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200796 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000797 }
798 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100799 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200800 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000801 }
802
803 /* Make accesses to pages with watchpoints go via the
804 watchpoint trap routines. */
805 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
806 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
807 /* Avoid trapping reads of pages with a write breakpoint. */
808 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200809 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000810 *address |= TLB_MMIO;
811 break;
812 }
813 }
814 }
815
816 return iotlb;
817}
bellard9fa3e852004-01-04 18:06:42 +0000818#endif /* defined(CONFIG_USER_ONLY) */
819
pbrooke2eef172008-06-08 01:09:01 +0000820#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000821
Anthony Liguoric227f092009-10-01 16:12:16 -0500822static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200823 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200824static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200825
Stefan Weil575ddeb2013-09-29 20:56:45 +0200826static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200827
828/*
829 * Set a custom physical guest memory alloator.
830 * Accelerators with unusual needs may need this. Hopefully, we can
831 * get rid of it eventually.
832 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200833void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200834{
835 phys_mem_alloc = alloc;
836}
837
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200838static uint16_t phys_section_add(PhysPageMap *map,
839 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200840{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200841 /* The physical section number is ORed with a page-aligned
842 * pointer to produce the iotlb entries. Thus it should
843 * never overflow into the page-aligned value.
844 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200845 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200846
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200847 if (map->sections_nb == map->sections_nb_alloc) {
848 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
849 map->sections = g_renew(MemoryRegionSection, map->sections,
850 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200851 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200852 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200853 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200854 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200855}
856
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200857static void phys_section_destroy(MemoryRegion *mr)
858{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200859 memory_region_unref(mr);
860
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200861 if (mr->subpage) {
862 subpage_t *subpage = container_of(mr, subpage_t, iomem);
863 memory_region_destroy(&subpage->iomem);
864 g_free(subpage);
865 }
866}
867
Paolo Bonzini60926662013-05-29 12:30:26 +0200868static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200869{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200870 while (map->sections_nb > 0) {
871 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200872 phys_section_destroy(section->mr);
873 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200874 g_free(map->sections);
875 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200876}
877
Avi Kivityac1970f2012-10-03 16:22:53 +0200878static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200879{
880 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200881 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200882 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200883 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200884 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200885 MemoryRegionSection subsection = {
886 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200887 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200888 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200889 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200890
Avi Kivityf3705d52012-03-08 16:16:34 +0200891 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200892
Avi Kivityf3705d52012-03-08 16:16:34 +0200893 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200894 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100895 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200896 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200897 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200898 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200899 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200900 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200901 }
902 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200903 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200904 subpage_register(subpage, start, end,
905 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200906}
907
908
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200909static void register_multipage(AddressSpaceDispatch *d,
910 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000911{
Avi Kivitya8170e52012-10-23 12:30:10 +0200912 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200913 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200914 uint64_t num_pages = int128_get64(int128_rshift(section->size,
915 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200916
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200917 assert(num_pages);
918 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000919}
920
Avi Kivityac1970f2012-10-03 16:22:53 +0200921static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200922{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200923 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200924 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200925 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200926 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200927
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200928 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
929 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
930 - now.offset_within_address_space;
931
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200932 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200933 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200934 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200935 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200936 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200937 while (int128_ne(remain.size, now.size)) {
938 remain.size = int128_sub(remain.size, now.size);
939 remain.offset_within_address_space += int128_get64(now.size);
940 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400941 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200942 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200943 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800944 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200945 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200946 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400947 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200948 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200949 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400950 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200951 }
952}
953
Sheng Yang62a27442010-01-26 19:21:16 +0800954void qemu_flush_coalesced_mmio_buffer(void)
955{
956 if (kvm_enabled())
957 kvm_flush_coalesced_mmio_buffer();
958}
959
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700960void qemu_mutex_lock_ramlist(void)
961{
962 qemu_mutex_lock(&ram_list.mutex);
963}
964
965void qemu_mutex_unlock_ramlist(void)
966{
967 qemu_mutex_unlock(&ram_list.mutex);
968}
969
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200970#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300971
972#include <sys/vfs.h>
973
974#define HUGETLBFS_MAGIC 0x958458f6
975
976static long gethugepagesize(const char *path)
977{
978 struct statfs fs;
979 int ret;
980
981 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900982 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300983 } while (ret != 0 && errno == EINTR);
984
985 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900986 perror(path);
987 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -0300988 }
989
990 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +0900991 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -0300992
993 return fs.f_bsize;
994}
995
Marcelo Tosattief36fa12013-10-28 18:51:46 -0200996static sigjmp_buf sigjump;
997
998static void sigbus_handler(int signal)
999{
1000 siglongjmp(sigjump, 1);
1001}
1002
Alex Williamson04b16652010-07-02 11:13:17 -06001003static void *file_ram_alloc(RAMBlock *block,
1004 ram_addr_t memory,
1005 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001006{
1007 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001008 char *sanitized_name;
1009 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001010 void *area;
1011 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001012 unsigned long hpagesize;
1013
1014 hpagesize = gethugepagesize(path);
1015 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001016 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001017 }
1018
1019 if (memory < hpagesize) {
1020 return NULL;
1021 }
1022
1023 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1024 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
1025 return NULL;
1026 }
1027
Peter Feiner8ca761f2013-03-04 13:54:25 -05001028 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1029 sanitized_name = g_strdup(block->mr->name);
1030 for (c = sanitized_name; *c != '\0'; c++) {
1031 if (*c == '/')
1032 *c = '_';
1033 }
1034
1035 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1036 sanitized_name);
1037 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001038
1039 fd = mkstemp(filename);
1040 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001041 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001042 g_free(filename);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001043 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001044 }
1045 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001046 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001047
1048 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1049
1050 /*
1051 * ftruncate is not supported by hugetlbfs in older
1052 * hosts, so don't bother bailing out on errors.
1053 * If anything goes wrong with it under other filesystems,
1054 * mmap will fail.
1055 */
1056 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001057 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001058
Marcelo Tosattic9027602010-03-01 20:25:08 -03001059 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001060 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001061 perror("file_ram_alloc: can't mmap RAM pages");
1062 close(fd);
1063 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001064 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001065
1066 if (mem_prealloc) {
1067 int ret, i;
1068 struct sigaction act, oldact;
1069 sigset_t set, oldset;
1070
1071 memset(&act, 0, sizeof(act));
1072 act.sa_handler = &sigbus_handler;
1073 act.sa_flags = 0;
1074
1075 ret = sigaction(SIGBUS, &act, &oldact);
1076 if (ret) {
1077 perror("file_ram_alloc: failed to install signal handler");
1078 exit(1);
1079 }
1080
1081 /* unblock SIGBUS */
1082 sigemptyset(&set);
1083 sigaddset(&set, SIGBUS);
1084 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1085
1086 if (sigsetjmp(sigjump, 1)) {
1087 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1088 exit(1);
1089 }
1090
1091 /* MAP_POPULATE silently ignores failures */
Marcelo Tosatti2ba82852013-12-18 16:42:17 -02001092 for (i = 0; i < (memory/hpagesize); i++) {
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001093 memset(area + (hpagesize*i), 0, 1);
1094 }
1095
1096 ret = sigaction(SIGBUS, &oldact, NULL);
1097 if (ret) {
1098 perror("file_ram_alloc: failed to reinstall signal handler");
1099 exit(1);
1100 }
1101
1102 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1103 }
1104
Alex Williamson04b16652010-07-02 11:13:17 -06001105 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001106 return area;
1107}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001108#else
1109static void *file_ram_alloc(RAMBlock *block,
1110 ram_addr_t memory,
1111 const char *path)
1112{
1113 fprintf(stderr, "-mem-path not supported on this host\n");
1114 exit(1);
1115}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001116#endif
1117
Alex Williamsond17b5282010-06-25 11:08:38 -06001118static ram_addr_t find_ram_offset(ram_addr_t size)
1119{
Alex Williamson04b16652010-07-02 11:13:17 -06001120 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001121 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001122
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001123 assert(size != 0); /* it would hand out same offset multiple times */
1124
Paolo Bonzinia3161032012-11-14 15:54:48 +01001125 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001126 return 0;
1127
Paolo Bonzinia3161032012-11-14 15:54:48 +01001128 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001129 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001130
1131 end = block->offset + block->length;
1132
Paolo Bonzinia3161032012-11-14 15:54:48 +01001133 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001134 if (next_block->offset >= end) {
1135 next = MIN(next, next_block->offset);
1136 }
1137 }
1138 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001139 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001140 mingap = next - end;
1141 }
1142 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001143
1144 if (offset == RAM_ADDR_MAX) {
1145 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1146 (uint64_t)size);
1147 abort();
1148 }
1149
Alex Williamson04b16652010-07-02 11:13:17 -06001150 return offset;
1151}
1152
Juan Quintela652d7ec2012-07-20 10:37:54 +02001153ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001154{
Alex Williamsond17b5282010-06-25 11:08:38 -06001155 RAMBlock *block;
1156 ram_addr_t last = 0;
1157
Paolo Bonzinia3161032012-11-14 15:54:48 +01001158 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001159 last = MAX(last, block->offset + block->length);
1160
1161 return last;
1162}
1163
Jason Baronddb97f12012-08-02 15:44:16 -04001164static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1165{
1166 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001167
1168 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001169 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1170 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001171 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1172 if (ret) {
1173 perror("qemu_madvise");
1174 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1175 "but dump_guest_core=off specified\n");
1176 }
1177 }
1178}
1179
Avi Kivityc5705a72011-12-20 15:59:12 +02001180void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001181{
1182 RAMBlock *new_block, *block;
1183
Avi Kivityc5705a72011-12-20 15:59:12 +02001184 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001185 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001186 if (block->offset == addr) {
1187 new_block = block;
1188 break;
1189 }
1190 }
1191 assert(new_block);
1192 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001193
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001194 if (dev) {
1195 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001196 if (id) {
1197 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001198 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001199 }
1200 }
1201 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1202
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001203 /* This assumes the iothread lock is taken here too. */
1204 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001205 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001206 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001207 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1208 new_block->idstr);
1209 abort();
1210 }
1211 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001212 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001213}
1214
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001215static int memory_try_enable_merging(void *addr, size_t len)
1216{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001217 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001218 /* disabled by the user */
1219 return 0;
1220 }
1221
1222 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1223}
1224
Avi Kivityc5705a72011-12-20 15:59:12 +02001225ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1226 MemoryRegion *mr)
1227{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001228 RAMBlock *block, *new_block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001229 ram_addr_t old_ram_size, new_ram_size;
1230
1231 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001232
1233 size = TARGET_PAGE_ALIGN(size);
1234 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001235 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001236
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001237 /* This assumes the iothread lock is taken here too. */
1238 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001239 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001240 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001241 if (host) {
1242 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001243 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001244 } else if (xen_enabled()) {
1245 if (mem_path) {
1246 fprintf(stderr, "-mem-path not supported with Xen\n");
1247 exit(1);
1248 }
1249 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001250 } else {
1251 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001252 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1253 /*
1254 * file_ram_alloc() needs to allocate just like
1255 * phys_mem_alloc, but we haven't bothered to provide
1256 * a hook there.
1257 */
1258 fprintf(stderr,
1259 "-mem-path not supported with this accelerator\n");
1260 exit(1);
1261 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001262 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001263 }
1264 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001265 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001266 if (!new_block->host) {
1267 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1268 new_block->mr->name, strerror(errno));
1269 exit(1);
1270 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001271 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001272 }
1273 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001274 new_block->length = size;
1275
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001276 /* Keep the list sorted from biggest to smallest block. */
1277 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1278 if (block->length < new_block->length) {
1279 break;
1280 }
1281 }
1282 if (block) {
1283 QTAILQ_INSERT_BEFORE(block, new_block, next);
1284 } else {
1285 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1286 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001287 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001288
Umesh Deshpandef798b072011-08-18 11:41:17 -07001289 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001290 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001291
Juan Quintela2152f5c2013-10-08 13:52:02 +02001292 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1293
1294 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001295 int i;
1296 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1297 ram_list.dirty_memory[i] =
1298 bitmap_zero_extend(ram_list.dirty_memory[i],
1299 old_ram_size, new_ram_size);
1300 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001301 }
Juan Quintela75218e72013-10-08 12:31:54 +02001302 cpu_physical_memory_set_dirty_range(new_block->offset, size);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001303
Jason Baronddb97f12012-08-02 15:44:16 -04001304 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001305 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001306 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001307
Cam Macdonell84b89d72010-07-26 18:10:57 -06001308 if (kvm_enabled())
1309 kvm_setup_guest_memory(new_block->host, size);
1310
1311 return new_block->offset;
1312}
1313
Avi Kivityc5705a72011-12-20 15:59:12 +02001314ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001315{
Avi Kivityc5705a72011-12-20 15:59:12 +02001316 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001317}
bellarde9a1ab12007-02-08 23:08:38 +00001318
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001319void qemu_ram_free_from_ptr(ram_addr_t addr)
1320{
1321 RAMBlock *block;
1322
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001323 /* This assumes the iothread lock is taken here too. */
1324 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001325 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001326 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001327 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001328 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001329 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001330 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001331 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001332 }
1333 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001334 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001335}
1336
Anthony Liguoric227f092009-10-01 16:12:16 -05001337void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001338{
Alex Williamson04b16652010-07-02 11:13:17 -06001339 RAMBlock *block;
1340
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001341 /* This assumes the iothread lock is taken here too. */
1342 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001343 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001344 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001345 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001346 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001347 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001348 if (block->flags & RAM_PREALLOC_MASK) {
1349 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001350 } else if (xen_enabled()) {
1351 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001352#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001353 } else if (block->fd >= 0) {
1354 munmap(block->host, block->length);
1355 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001356#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001357 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001358 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001359 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001360 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001361 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001362 }
1363 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001364 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001365
bellarde9a1ab12007-02-08 23:08:38 +00001366}
1367
Huang Yingcd19cfa2011-03-02 08:56:19 +01001368#ifndef _WIN32
1369void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1370{
1371 RAMBlock *block;
1372 ram_addr_t offset;
1373 int flags;
1374 void *area, *vaddr;
1375
Paolo Bonzinia3161032012-11-14 15:54:48 +01001376 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001377 offset = addr - block->offset;
1378 if (offset < block->length) {
1379 vaddr = block->host + offset;
1380 if (block->flags & RAM_PREALLOC_MASK) {
1381 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001382 } else if (xen_enabled()) {
1383 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001384 } else {
1385 flags = MAP_FIXED;
1386 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001387 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001388#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001389 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1390 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001391#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001392 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001393#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001394 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1395 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001396 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001397 /*
1398 * Remap needs to match alloc. Accelerators that
1399 * set phys_mem_alloc never remap. If they did,
1400 * we'd need a remap hook here.
1401 */
1402 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1403
Huang Yingcd19cfa2011-03-02 08:56:19 +01001404 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1405 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1406 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001407 }
1408 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001409 fprintf(stderr, "Could not remap addr: "
1410 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001411 length, addr);
1412 exit(1);
1413 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001414 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001415 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001416 }
1417 return;
1418 }
1419 }
1420}
1421#endif /* !_WIN32 */
1422
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001423/* Return a host pointer to ram allocated with qemu_ram_alloc.
1424 With the exception of the softmmu code in this file, this should
1425 only be used for local memory (e.g. video ram) that the device owns,
1426 and knows it isn't going to access beyond the end of the block.
1427
1428 It should not be used for general purpose DMA.
1429 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1430 */
1431void *qemu_get_ram_ptr(ram_addr_t addr)
1432{
1433 RAMBlock *block = qemu_get_ram_block(addr);
1434
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001435 if (xen_enabled()) {
1436 /* We need to check if the requested address is in the RAM
1437 * because we don't want to map the entire memory in QEMU.
1438 * In that case just map until the end of the page.
1439 */
1440 if (block->offset == 0) {
1441 return xen_map_cache(addr, 0, 0);
1442 } else if (block->host == NULL) {
1443 block->host =
1444 xen_map_cache(block->offset, block->length, 1);
1445 }
1446 }
1447 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001448}
1449
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001450/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1451 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001452static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001453{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001454 if (*size == 0) {
1455 return NULL;
1456 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001457 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001458 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001459 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001460 RAMBlock *block;
1461
Paolo Bonzinia3161032012-11-14 15:54:48 +01001462 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001463 if (addr - block->offset < block->length) {
1464 if (addr - block->offset + *size > block->length)
1465 *size = block->length - addr + block->offset;
1466 return block->host + (addr - block->offset);
1467 }
1468 }
1469
1470 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1471 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001472 }
1473}
1474
Paolo Bonzini7443b432013-06-03 12:44:02 +02001475/* Some of the softmmu routines need to translate from a host pointer
1476 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001477MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001478{
pbrook94a6b542009-04-11 17:15:54 +00001479 RAMBlock *block;
1480 uint8_t *host = ptr;
1481
Jan Kiszka868bb332011-06-21 22:59:09 +02001482 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001483 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001484 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001485 }
1486
Paolo Bonzini23887b72013-05-06 14:28:39 +02001487 block = ram_list.mru_block;
1488 if (block && block->host && host - block->host < block->length) {
1489 goto found;
1490 }
1491
Paolo Bonzinia3161032012-11-14 15:54:48 +01001492 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001493 /* This case append when the block is not mapped. */
1494 if (block->host == NULL) {
1495 continue;
1496 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001497 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001498 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001499 }
pbrook94a6b542009-04-11 17:15:54 +00001500 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001501
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001502 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001503
1504found:
1505 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001506 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001507}
Alex Williamsonf471a172010-06-11 11:11:42 -06001508
Avi Kivitya8170e52012-10-23 12:30:10 +02001509static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001510 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001511{
Juan Quintela52159192013-10-08 12:44:04 +02001512 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001513 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001514 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001515 switch (size) {
1516 case 1:
1517 stb_p(qemu_get_ram_ptr(ram_addr), val);
1518 break;
1519 case 2:
1520 stw_p(qemu_get_ram_ptr(ram_addr), val);
1521 break;
1522 case 4:
1523 stl_p(qemu_get_ram_ptr(ram_addr), val);
1524 break;
1525 default:
1526 abort();
1527 }
Juan Quintela52159192013-10-08 12:44:04 +02001528 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1529 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
bellardf23db162005-08-21 19:12:28 +00001530 /* we remove the notdirty callback only if the code has been
1531 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001532 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001533 CPUArchState *env = current_cpu->env_ptr;
1534 tlb_set_dirty(env, env->mem_io_vaddr);
1535 }
bellard1ccde1c2004-02-06 19:46:14 +00001536}
1537
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001538static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1539 unsigned size, bool is_write)
1540{
1541 return is_write;
1542}
1543
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001544static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001545 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001546 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001547 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001548};
1549
pbrook0f459d12008-06-09 00:20:13 +00001550/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001551static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001552{
Andreas Färber4917cf42013-05-27 05:17:50 +02001553 CPUArchState *env = current_cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001554 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001555 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001556 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001557 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001558
aliguori06d55cc2008-11-18 20:24:06 +00001559 if (env->watchpoint_hit) {
1560 /* We re-entered the check after replacing the TB. Now raise
1561 * the debug interrupt so that is will trigger after the
1562 * current instruction. */
Andreas Färberc3affe52013-01-18 15:03:43 +01001563 cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001564 return;
1565 }
pbrook2e70f6e2008-06-29 01:03:05 +00001566 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00001567 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001568 if ((vaddr == (wp->vaddr & len_mask) ||
1569 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001570 wp->flags |= BP_WATCHPOINT_HIT;
1571 if (!env->watchpoint_hit) {
1572 env->watchpoint_hit = wp;
Blue Swirl5a316522012-12-02 21:28:09 +00001573 tb_check_watchpoint(env);
aliguori6e140f22008-11-18 20:37:55 +00001574 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1575 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04001576 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00001577 } else {
1578 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1579 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04001580 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001581 }
aliguori06d55cc2008-11-18 20:24:06 +00001582 }
aliguori6e140f22008-11-18 20:37:55 +00001583 } else {
1584 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001585 }
1586 }
1587}
1588
pbrook6658ffb2007-03-16 23:58:11 +00001589/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1590 so these check for a hit then pass through to the normal out-of-line
1591 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001592static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001593 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001594{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001595 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1596 switch (size) {
1597 case 1: return ldub_phys(addr);
1598 case 2: return lduw_phys(addr);
1599 case 4: return ldl_phys(addr);
1600 default: abort();
1601 }
pbrook6658ffb2007-03-16 23:58:11 +00001602}
1603
Avi Kivitya8170e52012-10-23 12:30:10 +02001604static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001605 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001606{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001607 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1608 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001609 case 1:
1610 stb_phys(addr, val);
1611 break;
1612 case 2:
1613 stw_phys(addr, val);
1614 break;
1615 case 4:
1616 stl_phys(addr, val);
1617 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001618 default: abort();
1619 }
pbrook6658ffb2007-03-16 23:58:11 +00001620}
1621
Avi Kivity1ec9b902012-01-02 12:47:48 +02001622static const MemoryRegionOps watch_mem_ops = {
1623 .read = watch_mem_read,
1624 .write = watch_mem_write,
1625 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001626};
pbrook6658ffb2007-03-16 23:58:11 +00001627
Avi Kivitya8170e52012-10-23 12:30:10 +02001628static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001629 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001630{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001631 subpage_t *subpage = opaque;
1632 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001633
blueswir1db7b5422007-05-26 17:36:03 +00001634#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001635 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001636 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001637#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001638 address_space_read(subpage->as, addr + subpage->base, buf, len);
1639 switch (len) {
1640 case 1:
1641 return ldub_p(buf);
1642 case 2:
1643 return lduw_p(buf);
1644 case 4:
1645 return ldl_p(buf);
1646 default:
1647 abort();
1648 }
blueswir1db7b5422007-05-26 17:36:03 +00001649}
1650
Avi Kivitya8170e52012-10-23 12:30:10 +02001651static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001652 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001653{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001654 subpage_t *subpage = opaque;
1655 uint8_t buf[4];
1656
blueswir1db7b5422007-05-26 17:36:03 +00001657#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001658 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001659 " value %"PRIx64"\n",
1660 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001661#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001662 switch (len) {
1663 case 1:
1664 stb_p(buf, value);
1665 break;
1666 case 2:
1667 stw_p(buf, value);
1668 break;
1669 case 4:
1670 stl_p(buf, value);
1671 break;
1672 default:
1673 abort();
1674 }
1675 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001676}
1677
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001678static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001679 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001680{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001681 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001682#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001683 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001684 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001685#endif
1686
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001687 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001688 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001689}
1690
Avi Kivity70c68e42012-01-02 12:32:48 +02001691static const MemoryRegionOps subpage_ops = {
1692 .read = subpage_read,
1693 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001694 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001695 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001696};
1697
Anthony Liguoric227f092009-10-01 16:12:16 -05001698static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001699 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001700{
1701 int idx, eidx;
1702
1703 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1704 return -1;
1705 idx = SUBPAGE_IDX(start);
1706 eidx = SUBPAGE_IDX(end);
1707#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001708 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1709 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001710#endif
blueswir1db7b5422007-05-26 17:36:03 +00001711 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001712 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001713 }
1714
1715 return 0;
1716}
1717
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001718static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001719{
Anthony Liguoric227f092009-10-01 16:12:16 -05001720 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001721
Anthony Liguori7267c092011-08-20 22:09:37 -05001722 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001723
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001724 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001725 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001726 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001727 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001728 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001729#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001730 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1731 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001732#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001733 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001734
1735 return mmio;
1736}
1737
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001738static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001739{
1740 MemoryRegionSection section = {
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001741 .address_space = &address_space_memory,
Avi Kivity5312bd82012-02-12 18:32:55 +02001742 .mr = mr,
1743 .offset_within_address_space = 0,
1744 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001745 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001746 };
1747
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001748 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001749}
1750
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001751MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001752{
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001753 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001754}
1755
Avi Kivitye9179ce2009-06-14 11:38:52 +03001756static void io_mem_init(void)
1757{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001758 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1759 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001760 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001761 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001762 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001763 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001764 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001765}
1766
Avi Kivityac1970f2012-10-03 16:22:53 +02001767static void mem_begin(MemoryListener *listener)
1768{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001769 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001770 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1771 uint16_t n;
1772
1773 n = dummy_section(&d->map, &io_mem_unassigned);
1774 assert(n == PHYS_SECTION_UNASSIGNED);
1775 n = dummy_section(&d->map, &io_mem_notdirty);
1776 assert(n == PHYS_SECTION_NOTDIRTY);
1777 n = dummy_section(&d->map, &io_mem_rom);
1778 assert(n == PHYS_SECTION_ROM);
1779 n = dummy_section(&d->map, &io_mem_watch);
1780 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001781
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001782 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001783 d->as = as;
1784 as->next_dispatch = d;
1785}
1786
1787static void mem_commit(MemoryListener *listener)
1788{
1789 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001790 AddressSpaceDispatch *cur = as->dispatch;
1791 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001792
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001793 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001794
Paolo Bonzini0475d942013-05-29 12:28:21 +02001795 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001796
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001797 if (cur) {
1798 phys_sections_free(&cur->map);
1799 g_free(cur);
1800 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001801}
1802
Avi Kivity1d711482012-10-02 18:54:45 +02001803static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001804{
Andreas Färber182735e2013-05-29 22:29:20 +02001805 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001806
1807 /* since each CPU stores ram addresses in its TLB cache, we must
1808 reset the modified entries */
1809 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001810 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001811 CPUArchState *env = cpu->env_ptr;
1812
Avi Kivity117712c2012-02-12 21:23:17 +02001813 tlb_flush(env, 1);
1814 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001815}
1816
Avi Kivity93632742012-02-08 16:54:16 +02001817static void core_log_global_start(MemoryListener *listener)
1818{
Juan Quintela981fdf22013-10-10 11:54:09 +02001819 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001820}
1821
1822static void core_log_global_stop(MemoryListener *listener)
1823{
Juan Quintela981fdf22013-10-10 11:54:09 +02001824 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001825}
1826
Avi Kivity93632742012-02-08 16:54:16 +02001827static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001828 .log_global_start = core_log_global_start,
1829 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001830 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001831};
1832
Avi Kivity1d711482012-10-02 18:54:45 +02001833static MemoryListener tcg_memory_listener = {
1834 .commit = tcg_commit,
1835};
1836
Avi Kivityac1970f2012-10-03 16:22:53 +02001837void address_space_init_dispatch(AddressSpace *as)
1838{
Paolo Bonzini00752702013-05-29 12:13:54 +02001839 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001840 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001841 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001842 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001843 .region_add = mem_add,
1844 .region_nop = mem_add,
1845 .priority = 0,
1846 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001847 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001848}
1849
Avi Kivity83f3c252012-10-07 12:59:55 +02001850void address_space_destroy_dispatch(AddressSpace *as)
1851{
1852 AddressSpaceDispatch *d = as->dispatch;
1853
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001854 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001855 g_free(d);
1856 as->dispatch = NULL;
1857}
1858
Avi Kivity62152b82011-07-26 14:26:14 +03001859static void memory_map_init(void)
1860{
Anthony Liguori7267c092011-08-20 22:09:37 -05001861 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001862
Paolo Bonzini57271d62013-11-07 17:14:37 +01001863 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001864 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001865
Anthony Liguori7267c092011-08-20 22:09:37 -05001866 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001867 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1868 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001869 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001870
Avi Kivityf6790af2012-10-02 20:13:51 +02001871 memory_listener_register(&core_memory_listener, &address_space_memory);
liguang26416892013-09-04 14:37:33 +08001872 if (tcg_enabled()) {
1873 memory_listener_register(&tcg_memory_listener, &address_space_memory);
1874 }
Avi Kivity62152b82011-07-26 14:26:14 +03001875}
1876
1877MemoryRegion *get_system_memory(void)
1878{
1879 return system_memory;
1880}
1881
Avi Kivity309cb472011-08-08 16:09:03 +03001882MemoryRegion *get_system_io(void)
1883{
1884 return system_io;
1885}
1886
pbrooke2eef172008-06-08 01:09:01 +00001887#endif /* !defined(CONFIG_USER_ONLY) */
1888
bellard13eb76e2004-01-24 15:23:36 +00001889/* physical memory access (slow version, mainly for debug) */
1890#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001891int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001892 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001893{
1894 int l, flags;
1895 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001896 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001897
1898 while (len > 0) {
1899 page = addr & TARGET_PAGE_MASK;
1900 l = (page + TARGET_PAGE_SIZE) - addr;
1901 if (l > len)
1902 l = len;
1903 flags = page_get_flags(page);
1904 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001905 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001906 if (is_write) {
1907 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001908 return -1;
bellard579a97f2007-11-11 14:26:47 +00001909 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001910 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001911 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001912 memcpy(p, buf, l);
1913 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001914 } else {
1915 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001916 return -1;
bellard579a97f2007-11-11 14:26:47 +00001917 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001918 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001919 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001920 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001921 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001922 }
1923 len -= l;
1924 buf += l;
1925 addr += l;
1926 }
Paul Brooka68fe892010-03-01 00:08:59 +00001927 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001928}
bellard8df1cd02005-01-28 22:37:22 +00001929
bellard13eb76e2004-01-24 15:23:36 +00001930#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001931
Avi Kivitya8170e52012-10-23 12:30:10 +02001932static void invalidate_and_set_dirty(hwaddr addr,
1933 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001934{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001935 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001936 /* invalidate code */
1937 tb_invalidate_phys_page_range(addr, addr + length, 0);
1938 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02001939 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1940 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001941 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001942 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001943}
1944
Richard Henderson23326162013-07-08 14:55:59 -07001945static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001946{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001947 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001948
1949 /* Regions are assumed to support 1-4 byte accesses unless
1950 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001951 if (access_size_max == 0) {
1952 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001953 }
Richard Henderson23326162013-07-08 14:55:59 -07001954
1955 /* Bound the maximum access by the alignment of the address. */
1956 if (!mr->ops->impl.unaligned) {
1957 unsigned align_size_max = addr & -addr;
1958 if (align_size_max != 0 && align_size_max < access_size_max) {
1959 access_size_max = align_size_max;
1960 }
1961 }
1962
1963 /* Don't attempt accesses larger than the maximum. */
1964 if (l > access_size_max) {
1965 l = access_size_max;
1966 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001967 if (l & (l - 1)) {
1968 l = 1 << (qemu_fls(l) - 1);
1969 }
Richard Henderson23326162013-07-08 14:55:59 -07001970
1971 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001972}
1973
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001974bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001975 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001976{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001977 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001978 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001979 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001980 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001981 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001982 bool error = false;
ths3b46e622007-09-17 08:09:54 +00001983
bellard13eb76e2004-01-24 15:23:36 +00001984 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001985 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001986 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00001987
bellard13eb76e2004-01-24 15:23:36 +00001988 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001989 if (!memory_access_is_direct(mr, is_write)) {
1990 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02001991 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00001992 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07001993 switch (l) {
1994 case 8:
1995 /* 64 bit write access */
1996 val = ldq_p(buf);
1997 error |= io_mem_write(mr, addr1, val, 8);
1998 break;
1999 case 4:
bellard1c213d12005-09-03 10:49:04 +00002000 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002001 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002002 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002003 break;
2004 case 2:
bellard1c213d12005-09-03 10:49:04 +00002005 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002006 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002007 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002008 break;
2009 case 1:
bellard1c213d12005-09-03 10:49:04 +00002010 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002011 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002012 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002013 break;
2014 default:
2015 abort();
bellard13eb76e2004-01-24 15:23:36 +00002016 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002017 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002018 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002019 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002020 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002021 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002022 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002023 }
2024 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002025 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002026 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002027 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002028 switch (l) {
2029 case 8:
2030 /* 64 bit read access */
2031 error |= io_mem_read(mr, addr1, &val, 8);
2032 stq_p(buf, val);
2033 break;
2034 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002035 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002036 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002037 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002038 break;
2039 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002040 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002041 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002042 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002043 break;
2044 case 1:
bellard1c213d12005-09-03 10:49:04 +00002045 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002046 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002047 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002048 break;
2049 default:
2050 abort();
bellard13eb76e2004-01-24 15:23:36 +00002051 }
2052 } else {
2053 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002054 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002055 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002056 }
2057 }
2058 len -= l;
2059 buf += l;
2060 addr += l;
2061 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002062
2063 return error;
bellard13eb76e2004-01-24 15:23:36 +00002064}
bellard8df1cd02005-01-28 22:37:22 +00002065
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002066bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002067 const uint8_t *buf, int len)
2068{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002069 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002070}
2071
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002072bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002073{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002074 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002075}
2076
2077
Avi Kivitya8170e52012-10-23 12:30:10 +02002078void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002079 int len, int is_write)
2080{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002081 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002082}
2083
Alexander Graf582b55a2013-12-11 14:17:44 +01002084enum write_rom_type {
2085 WRITE_DATA,
2086 FLUSH_CACHE,
2087};
2088
2089static inline void cpu_physical_memory_write_rom_internal(
2090 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002091{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002092 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002093 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002094 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002095 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002096
bellardd0ecd2a2006-04-23 17:14:48 +00002097 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002098 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002099 mr = address_space_translate(&address_space_memory,
2100 addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002101
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002102 if (!(memory_region_is_ram(mr) ||
2103 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002104 /* do nothing */
2105 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002106 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002107 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002108 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002109 switch (type) {
2110 case WRITE_DATA:
2111 memcpy(ptr, buf, l);
2112 invalidate_and_set_dirty(addr1, l);
2113 break;
2114 case FLUSH_CACHE:
2115 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2116 break;
2117 }
bellardd0ecd2a2006-04-23 17:14:48 +00002118 }
2119 len -= l;
2120 buf += l;
2121 addr += l;
2122 }
2123}
2124
Alexander Graf582b55a2013-12-11 14:17:44 +01002125/* used for ROM loading : can write in RAM and ROM */
2126void cpu_physical_memory_write_rom(hwaddr addr,
2127 const uint8_t *buf, int len)
2128{
2129 cpu_physical_memory_write_rom_internal(addr, buf, len, WRITE_DATA);
2130}
2131
2132void cpu_flush_icache_range(hwaddr start, int len)
2133{
2134 /*
2135 * This function should do the same thing as an icache flush that was
2136 * triggered from within the guest. For TCG we are always cache coherent,
2137 * so there is no need to flush anything. For KVM / Xen we need to flush
2138 * the host's instruction cache at least.
2139 */
2140 if (tcg_enabled()) {
2141 return;
2142 }
2143
2144 cpu_physical_memory_write_rom_internal(start, NULL, len, FLUSH_CACHE);
2145}
2146
aliguori6d16c2f2009-01-22 16:59:11 +00002147typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002148 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002149 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002150 hwaddr addr;
2151 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002152} BounceBuffer;
2153
2154static BounceBuffer bounce;
2155
aliguoriba223c22009-01-22 16:59:16 +00002156typedef struct MapClient {
2157 void *opaque;
2158 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002159 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002160} MapClient;
2161
Blue Swirl72cf2d42009-09-12 07:36:22 +00002162static QLIST_HEAD(map_client_list, MapClient) map_client_list
2163 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002164
2165void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2166{
Anthony Liguori7267c092011-08-20 22:09:37 -05002167 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002168
2169 client->opaque = opaque;
2170 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002171 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002172 return client;
2173}
2174
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002175static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002176{
2177 MapClient *client = (MapClient *)_client;
2178
Blue Swirl72cf2d42009-09-12 07:36:22 +00002179 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002180 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002181}
2182
2183static void cpu_notify_map_clients(void)
2184{
2185 MapClient *client;
2186
Blue Swirl72cf2d42009-09-12 07:36:22 +00002187 while (!QLIST_EMPTY(&map_client_list)) {
2188 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002189 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002190 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002191 }
2192}
2193
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002194bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2195{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002196 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002197 hwaddr l, xlat;
2198
2199 while (len > 0) {
2200 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002201 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2202 if (!memory_access_is_direct(mr, is_write)) {
2203 l = memory_access_size(mr, l, addr);
2204 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002205 return false;
2206 }
2207 }
2208
2209 len -= l;
2210 addr += l;
2211 }
2212 return true;
2213}
2214
aliguori6d16c2f2009-01-22 16:59:11 +00002215/* Map a physical memory region into a host virtual address.
2216 * May map a subset of the requested range, given by and returned in *plen.
2217 * May return NULL if resources needed to perform the mapping are exhausted.
2218 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002219 * Use cpu_register_map_client() to know when retrying the map operation is
2220 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002221 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002222void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002223 hwaddr addr,
2224 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002225 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002226{
Avi Kivitya8170e52012-10-23 12:30:10 +02002227 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002228 hwaddr done = 0;
2229 hwaddr l, xlat, base;
2230 MemoryRegion *mr, *this_mr;
2231 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002232
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002233 if (len == 0) {
2234 return NULL;
2235 }
aliguori6d16c2f2009-01-22 16:59:11 +00002236
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002237 l = len;
2238 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2239 if (!memory_access_is_direct(mr, is_write)) {
2240 if (bounce.buffer) {
2241 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002242 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002243 /* Avoid unbounded allocations */
2244 l = MIN(l, TARGET_PAGE_SIZE);
2245 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002246 bounce.addr = addr;
2247 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002248
2249 memory_region_ref(mr);
2250 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002251 if (!is_write) {
2252 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002253 }
aliguori6d16c2f2009-01-22 16:59:11 +00002254
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002255 *plen = l;
2256 return bounce.buffer;
2257 }
2258
2259 base = xlat;
2260 raddr = memory_region_get_ram_addr(mr);
2261
2262 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002263 len -= l;
2264 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002265 done += l;
2266 if (len == 0) {
2267 break;
2268 }
2269
2270 l = len;
2271 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2272 if (this_mr != mr || xlat != base + done) {
2273 break;
2274 }
aliguori6d16c2f2009-01-22 16:59:11 +00002275 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002276
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002277 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002278 *plen = done;
2279 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002280}
2281
Avi Kivityac1970f2012-10-03 16:22:53 +02002282/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002283 * Will also mark the memory as dirty if is_write == 1. access_len gives
2284 * the amount of memory that was actually read or written by the caller.
2285 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002286void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2287 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002288{
2289 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002290 MemoryRegion *mr;
2291 ram_addr_t addr1;
2292
2293 mr = qemu_ram_addr_from_host(buffer, &addr1);
2294 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002295 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002296 while (access_len) {
2297 unsigned l;
2298 l = TARGET_PAGE_SIZE;
2299 if (l > access_len)
2300 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002301 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002302 addr1 += l;
2303 access_len -= l;
2304 }
2305 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002306 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002307 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002308 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002309 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002310 return;
2311 }
2312 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002313 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002314 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002315 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002316 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002317 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002318 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002319}
bellardd0ecd2a2006-04-23 17:14:48 +00002320
Avi Kivitya8170e52012-10-23 12:30:10 +02002321void *cpu_physical_memory_map(hwaddr addr,
2322 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002323 int is_write)
2324{
2325 return address_space_map(&address_space_memory, addr, plen, is_write);
2326}
2327
Avi Kivitya8170e52012-10-23 12:30:10 +02002328void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2329 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002330{
2331 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2332}
2333
bellard8df1cd02005-01-28 22:37:22 +00002334/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002335static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002336 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002337{
bellard8df1cd02005-01-28 22:37:22 +00002338 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002339 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002340 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002341 hwaddr l = 4;
2342 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002343
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002344 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2345 false);
2346 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002347 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002348 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002349#if defined(TARGET_WORDS_BIGENDIAN)
2350 if (endian == DEVICE_LITTLE_ENDIAN) {
2351 val = bswap32(val);
2352 }
2353#else
2354 if (endian == DEVICE_BIG_ENDIAN) {
2355 val = bswap32(val);
2356 }
2357#endif
bellard8df1cd02005-01-28 22:37:22 +00002358 } else {
2359 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002360 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002361 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002362 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002363 switch (endian) {
2364 case DEVICE_LITTLE_ENDIAN:
2365 val = ldl_le_p(ptr);
2366 break;
2367 case DEVICE_BIG_ENDIAN:
2368 val = ldl_be_p(ptr);
2369 break;
2370 default:
2371 val = ldl_p(ptr);
2372 break;
2373 }
bellard8df1cd02005-01-28 22:37:22 +00002374 }
2375 return val;
2376}
2377
Avi Kivitya8170e52012-10-23 12:30:10 +02002378uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002379{
2380 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2381}
2382
Avi Kivitya8170e52012-10-23 12:30:10 +02002383uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002384{
2385 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2386}
2387
Avi Kivitya8170e52012-10-23 12:30:10 +02002388uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002389{
2390 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2391}
2392
bellard84b7b8e2005-11-28 21:19:04 +00002393/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002394static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002395 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002396{
bellard84b7b8e2005-11-28 21:19:04 +00002397 uint8_t *ptr;
2398 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002399 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002400 hwaddr l = 8;
2401 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002402
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002403 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2404 false);
2405 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002406 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002407 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002408#if defined(TARGET_WORDS_BIGENDIAN)
2409 if (endian == DEVICE_LITTLE_ENDIAN) {
2410 val = bswap64(val);
2411 }
2412#else
2413 if (endian == DEVICE_BIG_ENDIAN) {
2414 val = bswap64(val);
2415 }
2416#endif
bellard84b7b8e2005-11-28 21:19:04 +00002417 } else {
2418 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002419 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002420 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002421 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002422 switch (endian) {
2423 case DEVICE_LITTLE_ENDIAN:
2424 val = ldq_le_p(ptr);
2425 break;
2426 case DEVICE_BIG_ENDIAN:
2427 val = ldq_be_p(ptr);
2428 break;
2429 default:
2430 val = ldq_p(ptr);
2431 break;
2432 }
bellard84b7b8e2005-11-28 21:19:04 +00002433 }
2434 return val;
2435}
2436
Avi Kivitya8170e52012-10-23 12:30:10 +02002437uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002438{
2439 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2440}
2441
Avi Kivitya8170e52012-10-23 12:30:10 +02002442uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002443{
2444 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2445}
2446
Avi Kivitya8170e52012-10-23 12:30:10 +02002447uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002448{
2449 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2450}
2451
bellardaab33092005-10-30 20:48:42 +00002452/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002453uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002454{
2455 uint8_t val;
2456 cpu_physical_memory_read(addr, &val, 1);
2457 return val;
2458}
2459
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002460/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002461static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002462 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002463{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002464 uint8_t *ptr;
2465 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002466 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002467 hwaddr l = 2;
2468 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002469
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002470 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2471 false);
2472 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002473 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002474 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002475#if defined(TARGET_WORDS_BIGENDIAN)
2476 if (endian == DEVICE_LITTLE_ENDIAN) {
2477 val = bswap16(val);
2478 }
2479#else
2480 if (endian == DEVICE_BIG_ENDIAN) {
2481 val = bswap16(val);
2482 }
2483#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002484 } else {
2485 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002486 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002487 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002488 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002489 switch (endian) {
2490 case DEVICE_LITTLE_ENDIAN:
2491 val = lduw_le_p(ptr);
2492 break;
2493 case DEVICE_BIG_ENDIAN:
2494 val = lduw_be_p(ptr);
2495 break;
2496 default:
2497 val = lduw_p(ptr);
2498 break;
2499 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002500 }
2501 return val;
bellardaab33092005-10-30 20:48:42 +00002502}
2503
Avi Kivitya8170e52012-10-23 12:30:10 +02002504uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002505{
2506 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2507}
2508
Avi Kivitya8170e52012-10-23 12:30:10 +02002509uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002510{
2511 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2512}
2513
Avi Kivitya8170e52012-10-23 12:30:10 +02002514uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002515{
2516 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2517}
2518
bellard8df1cd02005-01-28 22:37:22 +00002519/* warning: addr must be aligned. The ram page is not masked as dirty
2520 and the code inside is not invalidated. It is useful if the dirty
2521 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02002522void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002523{
bellard8df1cd02005-01-28 22:37:22 +00002524 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002525 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002526 hwaddr l = 4;
2527 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002528
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002529 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2530 true);
2531 if (l < 4 || !memory_access_is_direct(mr, true)) {
2532 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002533 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002534 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002535 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002536 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002537
2538 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002539 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002540 /* invalidate code */
2541 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2542 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02002543 cpu_physical_memory_set_dirty_flag(addr1,
2544 DIRTY_MEMORY_MIGRATION);
2545 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
aliguori74576192008-10-06 14:02:03 +00002546 }
2547 }
bellard8df1cd02005-01-28 22:37:22 +00002548 }
2549}
2550
2551/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002552static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002553 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002554{
bellard8df1cd02005-01-28 22:37:22 +00002555 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002556 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002557 hwaddr l = 4;
2558 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002559
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002560 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2561 true);
2562 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002563#if defined(TARGET_WORDS_BIGENDIAN)
2564 if (endian == DEVICE_LITTLE_ENDIAN) {
2565 val = bswap32(val);
2566 }
2567#else
2568 if (endian == DEVICE_BIG_ENDIAN) {
2569 val = bswap32(val);
2570 }
2571#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002572 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002573 } else {
bellard8df1cd02005-01-28 22:37:22 +00002574 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002575 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002576 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002577 switch (endian) {
2578 case DEVICE_LITTLE_ENDIAN:
2579 stl_le_p(ptr, val);
2580 break;
2581 case DEVICE_BIG_ENDIAN:
2582 stl_be_p(ptr, val);
2583 break;
2584 default:
2585 stl_p(ptr, val);
2586 break;
2587 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002588 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002589 }
2590}
2591
Avi Kivitya8170e52012-10-23 12:30:10 +02002592void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002593{
2594 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2595}
2596
Avi Kivitya8170e52012-10-23 12:30:10 +02002597void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002598{
2599 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2600}
2601
Avi Kivitya8170e52012-10-23 12:30:10 +02002602void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002603{
2604 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2605}
2606
bellardaab33092005-10-30 20:48:42 +00002607/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002608void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002609{
2610 uint8_t v = val;
2611 cpu_physical_memory_write(addr, &v, 1);
2612}
2613
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002614/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02002615static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002616 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002617{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002618 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002619 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002620 hwaddr l = 2;
2621 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002622
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002623 mr = address_space_translate(&address_space_memory, addr, &addr1, &l,
2624 true);
2625 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002626#if defined(TARGET_WORDS_BIGENDIAN)
2627 if (endian == DEVICE_LITTLE_ENDIAN) {
2628 val = bswap16(val);
2629 }
2630#else
2631 if (endian == DEVICE_BIG_ENDIAN) {
2632 val = bswap16(val);
2633 }
2634#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002635 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002636 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002637 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002638 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002639 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002640 switch (endian) {
2641 case DEVICE_LITTLE_ENDIAN:
2642 stw_le_p(ptr, val);
2643 break;
2644 case DEVICE_BIG_ENDIAN:
2645 stw_be_p(ptr, val);
2646 break;
2647 default:
2648 stw_p(ptr, val);
2649 break;
2650 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002651 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002652 }
bellardaab33092005-10-30 20:48:42 +00002653}
2654
Avi Kivitya8170e52012-10-23 12:30:10 +02002655void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002656{
2657 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2658}
2659
Avi Kivitya8170e52012-10-23 12:30:10 +02002660void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002661{
2662 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2663}
2664
Avi Kivitya8170e52012-10-23 12:30:10 +02002665void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002666{
2667 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2668}
2669
bellardaab33092005-10-30 20:48:42 +00002670/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02002671void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002672{
2673 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01002674 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00002675}
2676
Avi Kivitya8170e52012-10-23 12:30:10 +02002677void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002678{
2679 val = cpu_to_le64(val);
2680 cpu_physical_memory_write(addr, &val, 8);
2681}
2682
Avi Kivitya8170e52012-10-23 12:30:10 +02002683void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002684{
2685 val = cpu_to_be64(val);
2686 cpu_physical_memory_write(addr, &val, 8);
2687}
2688
aliguori5e2972f2009-03-28 17:51:36 +00002689/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002690int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002691 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002692{
2693 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002694 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002695 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002696
2697 while (len > 0) {
2698 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002699 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002700 /* if no physical page mapped, return an error */
2701 if (phys_addr == -1)
2702 return -1;
2703 l = (page + TARGET_PAGE_SIZE) - addr;
2704 if (l > len)
2705 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002706 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00002707 if (is_write)
2708 cpu_physical_memory_write_rom(phys_addr, buf, l);
2709 else
aliguori5e2972f2009-03-28 17:51:36 +00002710 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00002711 len -= l;
2712 buf += l;
2713 addr += l;
2714 }
2715 return 0;
2716}
Paul Brooka68fe892010-03-01 00:08:59 +00002717#endif
bellard13eb76e2004-01-24 15:23:36 +00002718
Blue Swirl8e4a4242013-01-06 18:30:17 +00002719#if !defined(CONFIG_USER_ONLY)
2720
2721/*
2722 * A helper function for the _utterly broken_ virtio device model to find out if
2723 * it's running on a big endian machine. Don't do this at home kids!
2724 */
2725bool virtio_is_big_endian(void);
2726bool virtio_is_big_endian(void)
2727{
2728#if defined(TARGET_WORDS_BIGENDIAN)
2729 return true;
2730#else
2731 return false;
2732#endif
2733}
2734
2735#endif
2736
Wen Congyang76f35532012-05-07 12:04:18 +08002737#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002738bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002739{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002740 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002741 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002742
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002743 mr = address_space_translate(&address_space_memory,
2744 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002745
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002746 return !(memory_region_is_ram(mr) ||
2747 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002748}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002749
2750void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2751{
2752 RAMBlock *block;
2753
2754 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2755 func(block->host, block->offset, block->length, opaque);
2756 }
2757}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002758#endif