blob: 4e179a6f66e736e7d8741ee3b568945dd02cd04c [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060029#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020032#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010033#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010034#include "qemu/timer.h"
35#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020036#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020052#include "exec/ram_addr.h"
Alexander Graf582b55a2013-12-11 14:17:44 +010053#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020054
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020055#include "qemu/range.h"
56
blueswir1db7b5422007-05-26 17:36:03 +000057//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000058
pbrook99773bd2006-04-16 15:14:59 +000059#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020060static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000061
Paolo Bonzinia3161032012-11-14 15:54:48 +010062RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030063
64static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030065static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030066
Avi Kivityf6790af2012-10-02 20:13:51 +020067AddressSpace address_space_io;
68AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020069
Paolo Bonzini0844e002013-05-24 14:37:28 +020070MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020071static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020072
pbrooke2eef172008-06-08 01:09:01 +000073#endif
bellard9fa3e852004-01-04 18:06:42 +000074
Andreas Färberbdc44642013-06-24 23:50:24 +020075struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000076/* current CPU in the current thread. It is only valid inside
77 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020078DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000079/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000080 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000081 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010082int use_icount;
bellard6a00d602005-11-21 23:25:50 +000083
pbrooke2eef172008-06-08 01:09:01 +000084#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020085
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020086typedef struct PhysPageEntry PhysPageEntry;
87
88struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020089 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020090 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020091 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020092 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020093};
94
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020095#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96
Paolo Bonzini03f49952013-11-07 17:14:36 +010097/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010098#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +010099
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200100#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100101#define P_L2_SIZE (1 << P_L2_BITS)
102
103#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104
105typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200106
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200107typedef struct PhysPageMap {
108 unsigned sections_nb;
109 unsigned sections_nb_alloc;
110 unsigned nodes_nb;
111 unsigned nodes_nb_alloc;
112 Node *nodes;
113 MemoryRegionSection *sections;
114} PhysPageMap;
115
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200116struct AddressSpaceDispatch {
117 /* This is a multi-level map on the physical address space.
118 * The bottom level has pointers to MemoryRegionSections.
119 */
120 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200121 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200122 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200123};
124
Jan Kiszka90260c62013-05-26 21:46:51 +0200125#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
126typedef struct subpage_t {
127 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200128 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200129 hwaddr base;
130 uint16_t sub_section[TARGET_PAGE_SIZE];
131} subpage_t;
132
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200133#define PHYS_SECTION_UNASSIGNED 0
134#define PHYS_SECTION_NOTDIRTY 1
135#define PHYS_SECTION_ROM 2
136#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200137
pbrooke2eef172008-06-08 01:09:01 +0000138static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300139static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000140static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000141
Avi Kivity1ec9b902012-01-02 12:47:48 +0200142static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000143#endif
bellard54936002003-05-13 00:25:15 +0000144
Paul Brook6d9a1302010-02-28 23:55:53 +0000145#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200146
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200148{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200149 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
151 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
152 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200153 }
154}
155
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200156static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200157{
158 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200159 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200160
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200161 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200163 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100164 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 map->nodes[ret][i].skip = 1;
166 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200169}
170
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200171static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
172 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200173 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200174{
175 PhysPageEntry *p;
176 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100177 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200178
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200179 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200180 lp->ptr = phys_map_node_alloc(map);
181 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200182 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100183 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200184 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200185 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186 }
187 }
188 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100191 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192
Paolo Bonzini03f49952013-11-07 17:14:36 +0100193 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200194 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200195 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200197 *index += step;
198 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200199 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200201 }
202 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200203 }
204}
205
Avi Kivityac1970f2012-10-03 16:22:53 +0200206static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200207 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200208 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000209{
Avi Kivity29990972012-02-13 20:21:20 +0200210 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200211 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000214}
215
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200216/* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
218 */
219static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
220{
221 unsigned valid_ptr = P_L2_SIZE;
222 int valid = 0;
223 PhysPageEntry *p;
224 int i;
225
226 if (lp->ptr == PHYS_MAP_NODE_NIL) {
227 return;
228 }
229
230 p = nodes[lp->ptr];
231 for (i = 0; i < P_L2_SIZE; i++) {
232 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
233 continue;
234 }
235
236 valid_ptr = i;
237 valid++;
238 if (p[i].skip) {
239 phys_page_compact(&p[i], nodes, compacted);
240 }
241 }
242
243 /* We can only compress if there's only one child. */
244 if (valid != 1) {
245 return;
246 }
247
248 assert(valid_ptr < P_L2_SIZE);
249
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
252 return;
253 }
254
255 lp->ptr = p[valid_ptr].ptr;
256 if (!p[valid_ptr].skip) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
261 * change this rule.
262 */
263 lp->skip = 0;
264 } else {
265 lp->skip += p[valid_ptr].skip;
266 }
267}
268
269static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
270{
271 DECLARE_BITMAP(compacted, nodes_nb);
272
273 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200274 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200275 }
276}
277
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200278static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200279 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000280{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200281 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200282 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200283 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200284
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200285 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200286 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200287 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200288 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200289 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100290 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200291 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200292
293 if (sections[lp.ptr].size.hi ||
294 range_covers_byte(sections[lp.ptr].offset_within_address_space,
295 sections[lp.ptr].size.lo, addr)) {
296 return &sections[lp.ptr];
297 } else {
298 return &sections[PHYS_SECTION_UNASSIGNED];
299 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200300}
301
Blue Swirle5548612012-04-21 13:08:33 +0000302bool memory_region_is_unassigned(MemoryRegion *mr)
303{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200304 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000305 && mr != &io_mem_watch;
306}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200307
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200308static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200309 hwaddr addr,
310 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200311{
Jan Kiszka90260c62013-05-26 21:46:51 +0200312 MemoryRegionSection *section;
313 subpage_t *subpage;
314
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200315 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200316 if (resolve_subpage && section->mr->subpage) {
317 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200318 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200319 }
320 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200321}
322
Jan Kiszka90260c62013-05-26 21:46:51 +0200323static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200326{
327 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100328 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200329
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200330 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200331 /* Compute offset within MemoryRegionSection */
332 addr -= section->offset_within_address_space;
333
334 /* Compute offset within MemoryRegion */
335 *xlat = addr + section->offset_within_region;
336
337 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100338 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200339 return section;
340}
Jan Kiszka90260c62013-05-26 21:46:51 +0200341
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100342static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
343{
344 if (memory_region_is_ram(mr)) {
345 return !(is_write && mr->readonly);
346 }
347 if (memory_region_is_romd(mr)) {
348 return !is_write;
349 }
350
351 return false;
352}
353
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200354MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
355 hwaddr *xlat, hwaddr *plen,
356 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200357{
Avi Kivity30951152012-10-30 13:47:46 +0200358 IOMMUTLBEntry iotlb;
359 MemoryRegionSection *section;
360 MemoryRegion *mr;
361 hwaddr len = *plen;
362
363 for (;;) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200365 mr = section->mr;
366
367 if (!mr->iommu_ops) {
368 break;
369 }
370
371 iotlb = mr->iommu_ops->translate(mr, addr);
372 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
373 | (addr & iotlb.addr_mask));
374 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
375 if (!(iotlb.perm & (1 << is_write))) {
376 mr = &io_mem_unassigned;
377 break;
378 }
379
380 as = iotlb.target_as;
381 }
382
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000383 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100384 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
385 len = MIN(page, len);
386 }
387
Avi Kivity30951152012-10-30 13:47:46 +0200388 *plen = len;
389 *xlat = addr;
390 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200391}
392
393MemoryRegionSection *
394address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
395 hwaddr *plen)
396{
Avi Kivity30951152012-10-30 13:47:46 +0200397 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200398 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200399
400 assert(!section->mr->iommu_ops);
401 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200402}
bellard9fa3e852004-01-04 18:06:42 +0000403#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000404
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200405void cpu_exec_init_all(void)
406{
407#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700408 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200409 memory_map_init();
410 io_mem_init();
411#endif
412}
413
Andreas Färberb170fce2013-01-20 20:23:22 +0100414#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000415
Juan Quintelae59fb372009-09-29 22:48:21 +0200416static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200417{
Andreas Färber259186a2013-01-17 18:51:17 +0100418 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200419
aurel323098dba2009-03-07 21:28:24 +0000420 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
421 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100422 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100423 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000424
425 return 0;
426}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200427
Andreas Färber1a1562f2013-06-17 04:09:11 +0200428const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200429 .name = "cpu_common",
430 .version_id = 1,
431 .minimum_version_id = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200432 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200433 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100434 VMSTATE_UINT32(halted, CPUState),
435 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200436 VMSTATE_END_OF_LIST()
437 }
438};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200439
pbrook9656f322008-07-01 20:01:19 +0000440#endif
441
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100442CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400443{
Andreas Färberbdc44642013-06-24 23:50:24 +0200444 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400445
Andreas Färberbdc44642013-06-24 23:50:24 +0200446 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100447 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200448 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100449 }
Glauber Costa950f1472009-06-09 12:15:18 -0400450 }
451
Andreas Färberbdc44642013-06-24 23:50:24 +0200452 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400453}
454
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000455#if !defined(CONFIG_USER_ONLY)
456void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
457{
458 /* We only support one address space per cpu at the moment. */
459 assert(cpu->as == as);
460
461 if (cpu->tcg_as_listener) {
462 memory_listener_unregister(cpu->tcg_as_listener);
463 } else {
464 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
465 }
466 cpu->tcg_as_listener->commit = tcg_commit;
467 memory_listener_register(cpu->tcg_as_listener, as);
468}
469#endif
470
Andreas Färber9349b4f2012-03-14 01:38:32 +0100471void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000472{
Andreas Färber9f09e182012-05-03 06:59:07 +0200473 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100474 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200475 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000476 int cpu_index;
477
pbrookc2764712009-03-07 15:24:59 +0000478#if defined(CONFIG_USER_ONLY)
479 cpu_list_lock();
480#endif
bellard6a00d602005-11-21 23:25:50 +0000481 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200482 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000483 cpu_index++;
484 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100485 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100486 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200487 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200488 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100489#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000490 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200491 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100492#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200493 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000494#if defined(CONFIG_USER_ONLY)
495 cpu_list_unlock();
496#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200497 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
498 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
499 }
pbrookb3c77242008-06-30 16:31:04 +0000500#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600501 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000502 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100503 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200504 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000505#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100506 if (cc->vmsd != NULL) {
507 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
508 }
bellardfd6ce8f2003-05-14 19:00:11 +0000509}
510
bellard1fddef42005-04-17 19:16:13 +0000511#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000512#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200513static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000514{
515 tb_invalidate_phys_page_range(pc, pc + 1, 0);
516}
517#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200518static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400519{
Max Filippove8262a12013-09-27 22:29:17 +0400520 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
521 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000522 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100523 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400524 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400525}
bellardc27004e2005-01-03 23:35:10 +0000526#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000527#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000528
Paul Brookc527ee82010-03-01 03:31:14 +0000529#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200530void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000531
532{
533}
534
Andreas Färber75a34032013-09-02 16:57:02 +0200535int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000536 int flags, CPUWatchpoint **watchpoint)
537{
538 return -ENOSYS;
539}
540#else
pbrook6658ffb2007-03-16 23:58:11 +0000541/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200542int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000543 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000544{
Andreas Färber75a34032013-09-02 16:57:02 +0200545 vaddr len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000546 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000547
aliguorib4051332008-11-18 20:14:20 +0000548 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400549 if ((len & (len - 1)) || (addr & ~len_mask) ||
550 len == 0 || len > TARGET_PAGE_SIZE) {
Andreas Färber75a34032013-09-02 16:57:02 +0200551 error_report("tried to set invalid watchpoint at %"
552 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000553 return -EINVAL;
554 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500555 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000556
aliguoria1d1bb32008-11-18 20:07:32 +0000557 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000558 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000559 wp->flags = flags;
560
aliguori2dc9f412008-11-18 20:56:59 +0000561 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200562 if (flags & BP_GDB) {
563 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
564 } else {
565 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
566 }
aliguoria1d1bb32008-11-18 20:07:32 +0000567
Andreas Färber31b030d2013-09-04 01:29:02 +0200568 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000569
570 if (watchpoint)
571 *watchpoint = wp;
572 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000573}
574
aliguoria1d1bb32008-11-18 20:07:32 +0000575/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200576int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000577 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000578{
Andreas Färber75a34032013-09-02 16:57:02 +0200579 vaddr len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000580 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000581
Andreas Färberff4700b2013-08-26 18:23:18 +0200582 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000583 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000584 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200585 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000586 return 0;
587 }
588 }
aliguoria1d1bb32008-11-18 20:07:32 +0000589 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000590}
591
aliguoria1d1bb32008-11-18 20:07:32 +0000592/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200593void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000594{
Andreas Färberff4700b2013-08-26 18:23:18 +0200595 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000596
Andreas Färber31b030d2013-09-04 01:29:02 +0200597 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000598
Anthony Liguori7267c092011-08-20 22:09:37 -0500599 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000600}
601
aliguoria1d1bb32008-11-18 20:07:32 +0000602/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200603void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000604{
aliguoric0ce9982008-11-25 22:13:57 +0000605 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000606
Andreas Färberff4700b2013-08-26 18:23:18 +0200607 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200608 if (wp->flags & mask) {
609 cpu_watchpoint_remove_by_ref(cpu, wp);
610 }
aliguoric0ce9982008-11-25 22:13:57 +0000611 }
aliguoria1d1bb32008-11-18 20:07:32 +0000612}
Paul Brookc527ee82010-03-01 03:31:14 +0000613#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000614
615/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200616int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000617 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000618{
bellard1fddef42005-04-17 19:16:13 +0000619#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000620 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000621
Anthony Liguori7267c092011-08-20 22:09:37 -0500622 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000623
624 bp->pc = pc;
625 bp->flags = flags;
626
aliguori2dc9f412008-11-18 20:56:59 +0000627 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200628 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200629 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200630 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200631 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200632 }
aliguoria1d1bb32008-11-18 20:07:32 +0000633
Andreas Färberf0c3c502013-08-26 21:22:53 +0200634 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000635
Andreas Färber00b941e2013-06-29 18:55:54 +0200636 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000637 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200638 }
aliguoria1d1bb32008-11-18 20:07:32 +0000639 return 0;
640#else
641 return -ENOSYS;
642#endif
643}
644
645/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200646int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000647{
648#if defined(TARGET_HAS_ICE)
649 CPUBreakpoint *bp;
650
Andreas Färberf0c3c502013-08-26 21:22:53 +0200651 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000652 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200653 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000654 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000655 }
bellard4c3a88a2003-07-26 12:06:08 +0000656 }
aliguoria1d1bb32008-11-18 20:07:32 +0000657 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000658#else
aliguoria1d1bb32008-11-18 20:07:32 +0000659 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000660#endif
661}
662
aliguoria1d1bb32008-11-18 20:07:32 +0000663/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200664void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000665{
bellard1fddef42005-04-17 19:16:13 +0000666#if defined(TARGET_HAS_ICE)
Andreas Färberf0c3c502013-08-26 21:22:53 +0200667 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
668
669 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000670
Anthony Liguori7267c092011-08-20 22:09:37 -0500671 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000672#endif
673}
674
675/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200676void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000677{
678#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000679 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000680
Andreas Färberf0c3c502013-08-26 21:22:53 +0200681 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200682 if (bp->flags & mask) {
683 cpu_breakpoint_remove_by_ref(cpu, bp);
684 }
aliguoric0ce9982008-11-25 22:13:57 +0000685 }
bellard4c3a88a2003-07-26 12:06:08 +0000686#endif
687}
688
bellardc33a3462003-07-29 20:50:33 +0000689/* enable or disable single step mode. EXCP_DEBUG is returned by the
690 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200691void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000692{
bellard1fddef42005-04-17 19:16:13 +0000693#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200694 if (cpu->singlestep_enabled != enabled) {
695 cpu->singlestep_enabled = enabled;
696 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200697 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200698 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100699 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000700 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200701 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000702 tb_flush(env);
703 }
bellardc33a3462003-07-29 20:50:33 +0000704 }
705#endif
706}
707
Andreas Färbera47dddd2013-09-03 17:38:47 +0200708void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000709{
710 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000711 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000712
713 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000714 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000715 fprintf(stderr, "qemu: fatal: ");
716 vfprintf(stderr, fmt, ap);
717 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200718 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000719 if (qemu_log_enabled()) {
720 qemu_log("qemu: fatal: ");
721 qemu_log_vprintf(fmt, ap2);
722 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200723 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000724 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000725 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000726 }
pbrook493ae1f2007-11-23 16:53:59 +0000727 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000728 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200729#if defined(CONFIG_USER_ONLY)
730 {
731 struct sigaction act;
732 sigfillset(&act.sa_mask);
733 act.sa_handler = SIG_DFL;
734 sigaction(SIGABRT, &act, NULL);
735 }
736#endif
bellard75012672003-06-21 13:11:07 +0000737 abort();
738}
739
bellard01243112004-01-04 15:48:17 +0000740#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200741static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
742{
743 RAMBlock *block;
744
745 /* The list is protected by the iothread lock here. */
746 block = ram_list.mru_block;
747 if (block && addr - block->offset < block->length) {
748 goto found;
749 }
750 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
751 if (addr - block->offset < block->length) {
752 goto found;
753 }
754 }
755
756 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
757 abort();
758
759found:
760 ram_list.mru_block = block;
761 return block;
762}
763
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200764static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000765{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200766 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200767 RAMBlock *block;
768 ram_addr_t end;
769
770 end = TARGET_PAGE_ALIGN(start + length);
771 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000772
Paolo Bonzini041603f2013-09-09 17:49:45 +0200773 block = qemu_get_ram_block(start);
774 assert(block == qemu_get_ram_block(end - 1));
775 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000776 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200777}
778
779/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200780void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200781 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200782{
Juan Quintelad24981d2012-05-22 00:42:40 +0200783 if (length == 0)
784 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200785 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200786
787 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200788 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200789 }
bellard1ccde1c2004-02-06 19:46:14 +0000790}
791
Juan Quintela981fdf22013-10-10 11:54:09 +0200792static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000793{
794 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000795}
796
Andreas Färberbb0e6272013-09-03 13:32:01 +0200797hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200798 MemoryRegionSection *section,
799 target_ulong vaddr,
800 hwaddr paddr, hwaddr xlat,
801 int prot,
802 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000803{
Avi Kivitya8170e52012-10-23 12:30:10 +0200804 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000805 CPUWatchpoint *wp;
806
Blue Swirlcc5bea62012-04-14 14:56:48 +0000807 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000808 /* Normal RAM. */
809 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200810 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000811 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200812 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000813 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200814 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000815 }
816 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100817 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200818 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000819 }
820
821 /* Make accesses to pages with watchpoints go via the
822 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200823 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Blue Swirle5548612012-04-21 13:08:33 +0000824 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
825 /* Avoid trapping reads of pages with a write breakpoint. */
826 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200827 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000828 *address |= TLB_MMIO;
829 break;
830 }
831 }
832 }
833
834 return iotlb;
835}
bellard9fa3e852004-01-04 18:06:42 +0000836#endif /* defined(CONFIG_USER_ONLY) */
837
pbrooke2eef172008-06-08 01:09:01 +0000838#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000839
Anthony Liguoric227f092009-10-01 16:12:16 -0500840static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200841 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200842static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200843
Stefan Weil575ddeb2013-09-29 20:56:45 +0200844static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200845
846/*
847 * Set a custom physical guest memory alloator.
848 * Accelerators with unusual needs may need this. Hopefully, we can
849 * get rid of it eventually.
850 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200851void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200852{
853 phys_mem_alloc = alloc;
854}
855
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200856static uint16_t phys_section_add(PhysPageMap *map,
857 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200858{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200859 /* The physical section number is ORed with a page-aligned
860 * pointer to produce the iotlb entries. Thus it should
861 * never overflow into the page-aligned value.
862 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200863 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200864
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200865 if (map->sections_nb == map->sections_nb_alloc) {
866 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
867 map->sections = g_renew(MemoryRegionSection, map->sections,
868 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200869 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200870 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200871 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200872 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200873}
874
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200875static void phys_section_destroy(MemoryRegion *mr)
876{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200877 memory_region_unref(mr);
878
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200879 if (mr->subpage) {
880 subpage_t *subpage = container_of(mr, subpage_t, iomem);
881 memory_region_destroy(&subpage->iomem);
882 g_free(subpage);
883 }
884}
885
Paolo Bonzini60926662013-05-29 12:30:26 +0200886static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200887{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200888 while (map->sections_nb > 0) {
889 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200890 phys_section_destroy(section->mr);
891 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200892 g_free(map->sections);
893 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200894}
895
Avi Kivityac1970f2012-10-03 16:22:53 +0200896static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200897{
898 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200899 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200900 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200901 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200902 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200903 MemoryRegionSection subsection = {
904 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200905 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200906 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200907 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200908
Avi Kivityf3705d52012-03-08 16:16:34 +0200909 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200910
Avi Kivityf3705d52012-03-08 16:16:34 +0200911 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200912 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100913 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200914 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200915 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200916 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200917 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200918 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200919 }
920 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200921 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200922 subpage_register(subpage, start, end,
923 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200924}
925
926
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200927static void register_multipage(AddressSpaceDispatch *d,
928 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000929{
Avi Kivitya8170e52012-10-23 12:30:10 +0200930 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200931 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200932 uint64_t num_pages = int128_get64(int128_rshift(section->size,
933 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200934
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200935 assert(num_pages);
936 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000937}
938
Avi Kivityac1970f2012-10-03 16:22:53 +0200939static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200940{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200941 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200942 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200943 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200944 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200945
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200946 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
947 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
948 - now.offset_within_address_space;
949
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200950 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200951 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200952 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200953 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200954 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200955 while (int128_ne(remain.size, now.size)) {
956 remain.size = int128_sub(remain.size, now.size);
957 remain.offset_within_address_space += int128_get64(now.size);
958 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400959 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200960 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200961 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800962 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200963 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200964 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400965 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200966 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200967 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400968 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200969 }
970}
971
Sheng Yang62a27442010-01-26 19:21:16 +0800972void qemu_flush_coalesced_mmio_buffer(void)
973{
974 if (kvm_enabled())
975 kvm_flush_coalesced_mmio_buffer();
976}
977
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700978void qemu_mutex_lock_ramlist(void)
979{
980 qemu_mutex_lock(&ram_list.mutex);
981}
982
983void qemu_mutex_unlock_ramlist(void)
984{
985 qemu_mutex_unlock(&ram_list.mutex);
986}
987
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200988#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300989
990#include <sys/vfs.h>
991
992#define HUGETLBFS_MAGIC 0x958458f6
993
994static long gethugepagesize(const char *path)
995{
996 struct statfs fs;
997 int ret;
998
999 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001000 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001001 } while (ret != 0 && errno == EINTR);
1002
1003 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001004 perror(path);
1005 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001006 }
1007
1008 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001009 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001010
1011 return fs.f_bsize;
1012}
1013
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001014static sigjmp_buf sigjump;
1015
1016static void sigbus_handler(int signal)
1017{
1018 siglongjmp(sigjump, 1);
1019}
1020
Alex Williamson04b16652010-07-02 11:13:17 -06001021static void *file_ram_alloc(RAMBlock *block,
1022 ram_addr_t memory,
1023 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001024{
1025 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001026 char *sanitized_name;
1027 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001028 void *area;
1029 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001030 unsigned long hpagesize;
1031
1032 hpagesize = gethugepagesize(path);
1033 if (!hpagesize) {
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001034 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001035 }
1036
1037 if (memory < hpagesize) {
1038 return NULL;
1039 }
1040
1041 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1042 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001043 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001044 }
1045
Peter Feiner8ca761f2013-03-04 13:54:25 -05001046 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1047 sanitized_name = g_strdup(block->mr->name);
1048 for (c = sanitized_name; *c != '\0'; c++) {
1049 if (*c == '/')
1050 *c = '_';
1051 }
1052
1053 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1054 sanitized_name);
1055 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001056
1057 fd = mkstemp(filename);
1058 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001059 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001060 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001061 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001062 }
1063 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001064 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001065
1066 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1067
1068 /*
1069 * ftruncate is not supported by hugetlbfs in older
1070 * hosts, so don't bother bailing out on errors.
1071 * If anything goes wrong with it under other filesystems,
1072 * mmap will fail.
1073 */
1074 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001075 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001076
Marcelo Tosattic9027602010-03-01 20:25:08 -03001077 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001078 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001079 perror("file_ram_alloc: can't mmap RAM pages");
1080 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001081 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001082 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001083
1084 if (mem_prealloc) {
1085 int ret, i;
1086 struct sigaction act, oldact;
1087 sigset_t set, oldset;
1088
1089 memset(&act, 0, sizeof(act));
1090 act.sa_handler = &sigbus_handler;
1091 act.sa_flags = 0;
1092
1093 ret = sigaction(SIGBUS, &act, &oldact);
1094 if (ret) {
1095 perror("file_ram_alloc: failed to install signal handler");
1096 exit(1);
1097 }
1098
1099 /* unblock SIGBUS */
1100 sigemptyset(&set);
1101 sigaddset(&set, SIGBUS);
1102 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1103
1104 if (sigsetjmp(sigjump, 1)) {
1105 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1106 exit(1);
1107 }
1108
1109 /* MAP_POPULATE silently ignores failures */
Marcelo Tosatti2ba82852013-12-18 16:42:17 -02001110 for (i = 0; i < (memory/hpagesize); i++) {
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001111 memset(area + (hpagesize*i), 0, 1);
1112 }
1113
1114 ret = sigaction(SIGBUS, &oldact, NULL);
1115 if (ret) {
1116 perror("file_ram_alloc: failed to reinstall signal handler");
1117 exit(1);
1118 }
1119
1120 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1121 }
1122
Alex Williamson04b16652010-07-02 11:13:17 -06001123 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001124 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001125
1126error:
1127 if (mem_prealloc) {
1128 exit(1);
1129 }
1130 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001131}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001132#else
1133static void *file_ram_alloc(RAMBlock *block,
1134 ram_addr_t memory,
1135 const char *path)
1136{
1137 fprintf(stderr, "-mem-path not supported on this host\n");
1138 exit(1);
1139}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001140#endif
1141
Alex Williamsond17b5282010-06-25 11:08:38 -06001142static ram_addr_t find_ram_offset(ram_addr_t size)
1143{
Alex Williamson04b16652010-07-02 11:13:17 -06001144 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001145 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001146
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001147 assert(size != 0); /* it would hand out same offset multiple times */
1148
Paolo Bonzinia3161032012-11-14 15:54:48 +01001149 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001150 return 0;
1151
Paolo Bonzinia3161032012-11-14 15:54:48 +01001152 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001153 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001154
1155 end = block->offset + block->length;
1156
Paolo Bonzinia3161032012-11-14 15:54:48 +01001157 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001158 if (next_block->offset >= end) {
1159 next = MIN(next, next_block->offset);
1160 }
1161 }
1162 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001163 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001164 mingap = next - end;
1165 }
1166 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001167
1168 if (offset == RAM_ADDR_MAX) {
1169 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1170 (uint64_t)size);
1171 abort();
1172 }
1173
Alex Williamson04b16652010-07-02 11:13:17 -06001174 return offset;
1175}
1176
Juan Quintela652d7ec2012-07-20 10:37:54 +02001177ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001178{
Alex Williamsond17b5282010-06-25 11:08:38 -06001179 RAMBlock *block;
1180 ram_addr_t last = 0;
1181
Paolo Bonzinia3161032012-11-14 15:54:48 +01001182 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001183 last = MAX(last, block->offset + block->length);
1184
1185 return last;
1186}
1187
Jason Baronddb97f12012-08-02 15:44:16 -04001188static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1189{
1190 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001191
1192 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001193 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1194 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001195 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1196 if (ret) {
1197 perror("qemu_madvise");
1198 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1199 "but dump_guest_core=off specified\n");
1200 }
1201 }
1202}
1203
Avi Kivityc5705a72011-12-20 15:59:12 +02001204void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001205{
1206 RAMBlock *new_block, *block;
1207
Avi Kivityc5705a72011-12-20 15:59:12 +02001208 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001209 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001210 if (block->offset == addr) {
1211 new_block = block;
1212 break;
1213 }
1214 }
1215 assert(new_block);
1216 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001217
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001218 if (dev) {
1219 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001220 if (id) {
1221 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001222 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001223 }
1224 }
1225 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1226
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001227 /* This assumes the iothread lock is taken here too. */
1228 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001229 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001230 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001231 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1232 new_block->idstr);
1233 abort();
1234 }
1235 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001236 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001237}
1238
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001239static int memory_try_enable_merging(void *addr, size_t len)
1240{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001241 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001242 /* disabled by the user */
1243 return 0;
1244 }
1245
1246 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1247}
1248
Avi Kivityc5705a72011-12-20 15:59:12 +02001249ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1250 MemoryRegion *mr)
1251{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001252 RAMBlock *block, *new_block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001253 ram_addr_t old_ram_size, new_ram_size;
1254
1255 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001256
1257 size = TARGET_PAGE_ALIGN(size);
1258 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001259 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001260
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001261 /* This assumes the iothread lock is taken here too. */
1262 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001263 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001264 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001265 if (host) {
1266 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001267 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001268 } else if (xen_enabled()) {
1269 if (mem_path) {
1270 fprintf(stderr, "-mem-path not supported with Xen\n");
1271 exit(1);
1272 }
1273 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001274 } else {
1275 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001276 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1277 /*
1278 * file_ram_alloc() needs to allocate just like
1279 * phys_mem_alloc, but we haven't bothered to provide
1280 * a hook there.
1281 */
1282 fprintf(stderr,
1283 "-mem-path not supported with this accelerator\n");
1284 exit(1);
1285 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001286 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001287 }
1288 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001289 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001290 if (!new_block->host) {
1291 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1292 new_block->mr->name, strerror(errno));
1293 exit(1);
1294 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001295 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001296 }
1297 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001298 new_block->length = size;
1299
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001300 /* Keep the list sorted from biggest to smallest block. */
1301 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1302 if (block->length < new_block->length) {
1303 break;
1304 }
1305 }
1306 if (block) {
1307 QTAILQ_INSERT_BEFORE(block, new_block, next);
1308 } else {
1309 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1310 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001311 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001312
Umesh Deshpandef798b072011-08-18 11:41:17 -07001313 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001314 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001315
Juan Quintela2152f5c2013-10-08 13:52:02 +02001316 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1317
1318 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001319 int i;
1320 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1321 ram_list.dirty_memory[i] =
1322 bitmap_zero_extend(ram_list.dirty_memory[i],
1323 old_ram_size, new_ram_size);
1324 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001325 }
Juan Quintela75218e72013-10-08 12:31:54 +02001326 cpu_physical_memory_set_dirty_range(new_block->offset, size);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001327
Jason Baronddb97f12012-08-02 15:44:16 -04001328 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001329 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001330 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001331
Cam Macdonell84b89d72010-07-26 18:10:57 -06001332 if (kvm_enabled())
1333 kvm_setup_guest_memory(new_block->host, size);
1334
1335 return new_block->offset;
1336}
1337
Avi Kivityc5705a72011-12-20 15:59:12 +02001338ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001339{
Avi Kivityc5705a72011-12-20 15:59:12 +02001340 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001341}
bellarde9a1ab12007-02-08 23:08:38 +00001342
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001343void qemu_ram_free_from_ptr(ram_addr_t addr)
1344{
1345 RAMBlock *block;
1346
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001347 /* This assumes the iothread lock is taken here too. */
1348 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001349 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001350 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001351 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001352 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001353 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001354 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001355 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001356 }
1357 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001358 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001359}
1360
Anthony Liguoric227f092009-10-01 16:12:16 -05001361void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001362{
Alex Williamson04b16652010-07-02 11:13:17 -06001363 RAMBlock *block;
1364
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001365 /* This assumes the iothread lock is taken here too. */
1366 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001367 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001368 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001369 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001370 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001371 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001372 if (block->flags & RAM_PREALLOC_MASK) {
1373 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001374 } else if (xen_enabled()) {
1375 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001376#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001377 } else if (block->fd >= 0) {
1378 munmap(block->host, block->length);
1379 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001380#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001381 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001382 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001383 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001384 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001385 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001386 }
1387 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001388 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001389
bellarde9a1ab12007-02-08 23:08:38 +00001390}
1391
Huang Yingcd19cfa2011-03-02 08:56:19 +01001392#ifndef _WIN32
1393void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1394{
1395 RAMBlock *block;
1396 ram_addr_t offset;
1397 int flags;
1398 void *area, *vaddr;
1399
Paolo Bonzinia3161032012-11-14 15:54:48 +01001400 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001401 offset = addr - block->offset;
1402 if (offset < block->length) {
1403 vaddr = block->host + offset;
1404 if (block->flags & RAM_PREALLOC_MASK) {
1405 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001406 } else if (xen_enabled()) {
1407 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001408 } else {
1409 flags = MAP_FIXED;
1410 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001411 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001412#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001413 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1414 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001415#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001416 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001417#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001418 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1419 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001420 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001421 /*
1422 * Remap needs to match alloc. Accelerators that
1423 * set phys_mem_alloc never remap. If they did,
1424 * we'd need a remap hook here.
1425 */
1426 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1427
Huang Yingcd19cfa2011-03-02 08:56:19 +01001428 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1429 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1430 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001431 }
1432 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001433 fprintf(stderr, "Could not remap addr: "
1434 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001435 length, addr);
1436 exit(1);
1437 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001438 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001439 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001440 }
1441 return;
1442 }
1443 }
1444}
1445#endif /* !_WIN32 */
1446
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001447/* Return a host pointer to ram allocated with qemu_ram_alloc.
1448 With the exception of the softmmu code in this file, this should
1449 only be used for local memory (e.g. video ram) that the device owns,
1450 and knows it isn't going to access beyond the end of the block.
1451
1452 It should not be used for general purpose DMA.
1453 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1454 */
1455void *qemu_get_ram_ptr(ram_addr_t addr)
1456{
1457 RAMBlock *block = qemu_get_ram_block(addr);
1458
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001459 if (xen_enabled()) {
1460 /* We need to check if the requested address is in the RAM
1461 * because we don't want to map the entire memory in QEMU.
1462 * In that case just map until the end of the page.
1463 */
1464 if (block->offset == 0) {
1465 return xen_map_cache(addr, 0, 0);
1466 } else if (block->host == NULL) {
1467 block->host =
1468 xen_map_cache(block->offset, block->length, 1);
1469 }
1470 }
1471 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001472}
1473
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001474/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1475 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001476static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001477{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001478 if (*size == 0) {
1479 return NULL;
1480 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001481 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001482 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001483 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001484 RAMBlock *block;
1485
Paolo Bonzinia3161032012-11-14 15:54:48 +01001486 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001487 if (addr - block->offset < block->length) {
1488 if (addr - block->offset + *size > block->length)
1489 *size = block->length - addr + block->offset;
1490 return block->host + (addr - block->offset);
1491 }
1492 }
1493
1494 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1495 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001496 }
1497}
1498
Paolo Bonzini7443b432013-06-03 12:44:02 +02001499/* Some of the softmmu routines need to translate from a host pointer
1500 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001501MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001502{
pbrook94a6b542009-04-11 17:15:54 +00001503 RAMBlock *block;
1504 uint8_t *host = ptr;
1505
Jan Kiszka868bb332011-06-21 22:59:09 +02001506 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001507 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001508 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001509 }
1510
Paolo Bonzini23887b72013-05-06 14:28:39 +02001511 block = ram_list.mru_block;
1512 if (block && block->host && host - block->host < block->length) {
1513 goto found;
1514 }
1515
Paolo Bonzinia3161032012-11-14 15:54:48 +01001516 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001517 /* This case append when the block is not mapped. */
1518 if (block->host == NULL) {
1519 continue;
1520 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001521 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001522 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001523 }
pbrook94a6b542009-04-11 17:15:54 +00001524 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001525
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001526 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001527
1528found:
1529 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001530 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001531}
Alex Williamsonf471a172010-06-11 11:11:42 -06001532
Avi Kivitya8170e52012-10-23 12:30:10 +02001533static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001534 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001535{
Juan Quintela52159192013-10-08 12:44:04 +02001536 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001537 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001538 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001539 switch (size) {
1540 case 1:
1541 stb_p(qemu_get_ram_ptr(ram_addr), val);
1542 break;
1543 case 2:
1544 stw_p(qemu_get_ram_ptr(ram_addr), val);
1545 break;
1546 case 4:
1547 stl_p(qemu_get_ram_ptr(ram_addr), val);
1548 break;
1549 default:
1550 abort();
1551 }
Juan Quintela52159192013-10-08 12:44:04 +02001552 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1553 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
bellardf23db162005-08-21 19:12:28 +00001554 /* we remove the notdirty callback only if the code has been
1555 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001556 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001557 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001558 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001559 }
bellard1ccde1c2004-02-06 19:46:14 +00001560}
1561
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001562static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1563 unsigned size, bool is_write)
1564{
1565 return is_write;
1566}
1567
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001568static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001569 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001570 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001571 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001572};
1573
pbrook0f459d12008-06-09 00:20:13 +00001574/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001575static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001576{
Andreas Färber93afead2013-08-26 03:41:01 +02001577 CPUState *cpu = current_cpu;
1578 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001579 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001580 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001581 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001582 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001583
Andreas Färberff4700b2013-08-26 18:23:18 +02001584 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001585 /* We re-entered the check after replacing the TB. Now raise
1586 * the debug interrupt so that is will trigger after the
1587 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001588 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001589 return;
1590 }
Andreas Färber93afead2013-08-26 03:41:01 +02001591 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001592 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001593 if ((vaddr == (wp->vaddr & len_mask) ||
1594 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001595 wp->flags |= BP_WATCHPOINT_HIT;
Andreas Färberff4700b2013-08-26 18:23:18 +02001596 if (!cpu->watchpoint_hit) {
1597 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001598 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001599 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001600 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001601 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001602 } else {
1603 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001604 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001605 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001606 }
aliguori06d55cc2008-11-18 20:24:06 +00001607 }
aliguori6e140f22008-11-18 20:37:55 +00001608 } else {
1609 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001610 }
1611 }
1612}
1613
pbrook6658ffb2007-03-16 23:58:11 +00001614/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1615 so these check for a hit then pass through to the normal out-of-line
1616 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001617static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001618 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001619{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001620 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1621 switch (size) {
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001622 case 1: return ldub_phys(&address_space_memory, addr);
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001623 case 2: return lduw_phys(&address_space_memory, addr);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001624 case 4: return ldl_phys(&address_space_memory, addr);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001625 default: abort();
1626 }
pbrook6658ffb2007-03-16 23:58:11 +00001627}
1628
Avi Kivitya8170e52012-10-23 12:30:10 +02001629static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001630 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001631{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001632 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1633 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001634 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001635 stb_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001636 break;
1637 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001638 stw_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001639 break;
1640 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001641 stl_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001642 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001643 default: abort();
1644 }
pbrook6658ffb2007-03-16 23:58:11 +00001645}
1646
Avi Kivity1ec9b902012-01-02 12:47:48 +02001647static const MemoryRegionOps watch_mem_ops = {
1648 .read = watch_mem_read,
1649 .write = watch_mem_write,
1650 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001651};
pbrook6658ffb2007-03-16 23:58:11 +00001652
Avi Kivitya8170e52012-10-23 12:30:10 +02001653static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001654 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001655{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001656 subpage_t *subpage = opaque;
1657 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001658
blueswir1db7b5422007-05-26 17:36:03 +00001659#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001660 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001661 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001662#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001663 address_space_read(subpage->as, addr + subpage->base, buf, len);
1664 switch (len) {
1665 case 1:
1666 return ldub_p(buf);
1667 case 2:
1668 return lduw_p(buf);
1669 case 4:
1670 return ldl_p(buf);
1671 default:
1672 abort();
1673 }
blueswir1db7b5422007-05-26 17:36:03 +00001674}
1675
Avi Kivitya8170e52012-10-23 12:30:10 +02001676static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001677 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001678{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001679 subpage_t *subpage = opaque;
1680 uint8_t buf[4];
1681
blueswir1db7b5422007-05-26 17:36:03 +00001682#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001683 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001684 " value %"PRIx64"\n",
1685 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001686#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001687 switch (len) {
1688 case 1:
1689 stb_p(buf, value);
1690 break;
1691 case 2:
1692 stw_p(buf, value);
1693 break;
1694 case 4:
1695 stl_p(buf, value);
1696 break;
1697 default:
1698 abort();
1699 }
1700 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001701}
1702
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001703static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001704 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001705{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001706 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001707#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001708 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001709 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001710#endif
1711
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001712 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001713 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001714}
1715
Avi Kivity70c68e42012-01-02 12:32:48 +02001716static const MemoryRegionOps subpage_ops = {
1717 .read = subpage_read,
1718 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001719 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001720 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001721};
1722
Anthony Liguoric227f092009-10-01 16:12:16 -05001723static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001724 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001725{
1726 int idx, eidx;
1727
1728 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1729 return -1;
1730 idx = SUBPAGE_IDX(start);
1731 eidx = SUBPAGE_IDX(end);
1732#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001733 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1734 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001735#endif
blueswir1db7b5422007-05-26 17:36:03 +00001736 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001737 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001738 }
1739
1740 return 0;
1741}
1742
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001743static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001744{
Anthony Liguoric227f092009-10-01 16:12:16 -05001745 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001746
Anthony Liguori7267c092011-08-20 22:09:37 -05001747 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001748
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001749 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001750 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001751 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001752 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001753 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001754#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001755 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1756 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001757#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001758 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001759
1760 return mmio;
1761}
1762
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001763static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001764{
1765 MemoryRegionSection section = {
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001766 .address_space = &address_space_memory,
Avi Kivity5312bd82012-02-12 18:32:55 +02001767 .mr = mr,
1768 .offset_within_address_space = 0,
1769 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001770 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001771 };
1772
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001773 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001774}
1775
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001776MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001777{
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001778 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001779}
1780
Avi Kivitye9179ce2009-06-14 11:38:52 +03001781static void io_mem_init(void)
1782{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001783 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1784 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001785 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001786 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001787 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001788 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001789 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001790}
1791
Avi Kivityac1970f2012-10-03 16:22:53 +02001792static void mem_begin(MemoryListener *listener)
1793{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001794 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001795 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1796 uint16_t n;
1797
1798 n = dummy_section(&d->map, &io_mem_unassigned);
1799 assert(n == PHYS_SECTION_UNASSIGNED);
1800 n = dummy_section(&d->map, &io_mem_notdirty);
1801 assert(n == PHYS_SECTION_NOTDIRTY);
1802 n = dummy_section(&d->map, &io_mem_rom);
1803 assert(n == PHYS_SECTION_ROM);
1804 n = dummy_section(&d->map, &io_mem_watch);
1805 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001806
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001807 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001808 d->as = as;
1809 as->next_dispatch = d;
1810}
1811
1812static void mem_commit(MemoryListener *listener)
1813{
1814 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001815 AddressSpaceDispatch *cur = as->dispatch;
1816 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001817
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001818 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001819
Paolo Bonzini0475d942013-05-29 12:28:21 +02001820 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001821
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001822 if (cur) {
1823 phys_sections_free(&cur->map);
1824 g_free(cur);
1825 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001826}
1827
Avi Kivity1d711482012-10-02 18:54:45 +02001828static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001829{
Andreas Färber182735e2013-05-29 22:29:20 +02001830 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001831
1832 /* since each CPU stores ram addresses in its TLB cache, we must
1833 reset the modified entries */
1834 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001835 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01001836 /* FIXME: Disentangle the cpu.h circular files deps so we can
1837 directly get the right CPU from listener. */
1838 if (cpu->tcg_as_listener != listener) {
1839 continue;
1840 }
Andreas Färber00c8cb02013-09-04 02:19:44 +02001841 tlb_flush(cpu, 1);
Avi Kivity117712c2012-02-12 21:23:17 +02001842 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001843}
1844
Avi Kivity93632742012-02-08 16:54:16 +02001845static void core_log_global_start(MemoryListener *listener)
1846{
Juan Quintela981fdf22013-10-10 11:54:09 +02001847 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001848}
1849
1850static void core_log_global_stop(MemoryListener *listener)
1851{
Juan Quintela981fdf22013-10-10 11:54:09 +02001852 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001853}
1854
Avi Kivity93632742012-02-08 16:54:16 +02001855static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001856 .log_global_start = core_log_global_start,
1857 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001858 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001859};
1860
Avi Kivityac1970f2012-10-03 16:22:53 +02001861void address_space_init_dispatch(AddressSpace *as)
1862{
Paolo Bonzini00752702013-05-29 12:13:54 +02001863 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001864 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001865 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001866 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001867 .region_add = mem_add,
1868 .region_nop = mem_add,
1869 .priority = 0,
1870 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001871 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001872}
1873
Avi Kivity83f3c252012-10-07 12:59:55 +02001874void address_space_destroy_dispatch(AddressSpace *as)
1875{
1876 AddressSpaceDispatch *d = as->dispatch;
1877
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001878 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001879 g_free(d);
1880 as->dispatch = NULL;
1881}
1882
Avi Kivity62152b82011-07-26 14:26:14 +03001883static void memory_map_init(void)
1884{
Anthony Liguori7267c092011-08-20 22:09:37 -05001885 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001886
Paolo Bonzini57271d62013-11-07 17:14:37 +01001887 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001888 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001889
Anthony Liguori7267c092011-08-20 22:09:37 -05001890 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001891 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1892 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001893 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001894
Avi Kivityf6790af2012-10-02 20:13:51 +02001895 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03001896}
1897
1898MemoryRegion *get_system_memory(void)
1899{
1900 return system_memory;
1901}
1902
Avi Kivity309cb472011-08-08 16:09:03 +03001903MemoryRegion *get_system_io(void)
1904{
1905 return system_io;
1906}
1907
pbrooke2eef172008-06-08 01:09:01 +00001908#endif /* !defined(CONFIG_USER_ONLY) */
1909
bellard13eb76e2004-01-24 15:23:36 +00001910/* physical memory access (slow version, mainly for debug) */
1911#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001912int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001913 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001914{
1915 int l, flags;
1916 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001917 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001918
1919 while (len > 0) {
1920 page = addr & TARGET_PAGE_MASK;
1921 l = (page + TARGET_PAGE_SIZE) - addr;
1922 if (l > len)
1923 l = len;
1924 flags = page_get_flags(page);
1925 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001926 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001927 if (is_write) {
1928 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001929 return -1;
bellard579a97f2007-11-11 14:26:47 +00001930 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001931 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001932 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001933 memcpy(p, buf, l);
1934 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001935 } else {
1936 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001937 return -1;
bellard579a97f2007-11-11 14:26:47 +00001938 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001939 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001940 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001941 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001942 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001943 }
1944 len -= l;
1945 buf += l;
1946 addr += l;
1947 }
Paul Brooka68fe892010-03-01 00:08:59 +00001948 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001949}
bellard8df1cd02005-01-28 22:37:22 +00001950
bellard13eb76e2004-01-24 15:23:36 +00001951#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001952
Avi Kivitya8170e52012-10-23 12:30:10 +02001953static void invalidate_and_set_dirty(hwaddr addr,
1954 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001955{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001956 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001957 /* invalidate code */
1958 tb_invalidate_phys_page_range(addr, addr + length, 0);
1959 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02001960 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1961 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001962 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001963 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001964}
1965
Richard Henderson23326162013-07-08 14:55:59 -07001966static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001967{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001968 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001969
1970 /* Regions are assumed to support 1-4 byte accesses unless
1971 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001972 if (access_size_max == 0) {
1973 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001974 }
Richard Henderson23326162013-07-08 14:55:59 -07001975
1976 /* Bound the maximum access by the alignment of the address. */
1977 if (!mr->ops->impl.unaligned) {
1978 unsigned align_size_max = addr & -addr;
1979 if (align_size_max != 0 && align_size_max < access_size_max) {
1980 access_size_max = align_size_max;
1981 }
1982 }
1983
1984 /* Don't attempt accesses larger than the maximum. */
1985 if (l > access_size_max) {
1986 l = access_size_max;
1987 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001988 if (l & (l - 1)) {
1989 l = 1 << (qemu_fls(l) - 1);
1990 }
Richard Henderson23326162013-07-08 14:55:59 -07001991
1992 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001993}
1994
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001995bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001996 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001997{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001998 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001999 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002000 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002001 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002002 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002003 bool error = false;
ths3b46e622007-09-17 08:09:54 +00002004
bellard13eb76e2004-01-24 15:23:36 +00002005 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002006 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002007 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002008
bellard13eb76e2004-01-24 15:23:36 +00002009 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002010 if (!memory_access_is_direct(mr, is_write)) {
2011 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002012 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002013 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002014 switch (l) {
2015 case 8:
2016 /* 64 bit write access */
2017 val = ldq_p(buf);
2018 error |= io_mem_write(mr, addr1, val, 8);
2019 break;
2020 case 4:
bellard1c213d12005-09-03 10:49:04 +00002021 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002022 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002023 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002024 break;
2025 case 2:
bellard1c213d12005-09-03 10:49:04 +00002026 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002027 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002028 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002029 break;
2030 case 1:
bellard1c213d12005-09-03 10:49:04 +00002031 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002032 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002033 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002034 break;
2035 default:
2036 abort();
bellard13eb76e2004-01-24 15:23:36 +00002037 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002038 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002039 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002040 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002041 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002042 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002043 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002044 }
2045 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002046 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002047 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002048 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002049 switch (l) {
2050 case 8:
2051 /* 64 bit read access */
2052 error |= io_mem_read(mr, addr1, &val, 8);
2053 stq_p(buf, val);
2054 break;
2055 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002056 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002057 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002058 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002059 break;
2060 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002061 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002062 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002063 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002064 break;
2065 case 1:
bellard1c213d12005-09-03 10:49:04 +00002066 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002067 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002068 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002069 break;
2070 default:
2071 abort();
bellard13eb76e2004-01-24 15:23:36 +00002072 }
2073 } else {
2074 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002075 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002076 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002077 }
2078 }
2079 len -= l;
2080 buf += l;
2081 addr += l;
2082 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002083
2084 return error;
bellard13eb76e2004-01-24 15:23:36 +00002085}
bellard8df1cd02005-01-28 22:37:22 +00002086
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002087bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002088 const uint8_t *buf, int len)
2089{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002090 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002091}
2092
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002093bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002094{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002095 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002096}
2097
2098
Avi Kivitya8170e52012-10-23 12:30:10 +02002099void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002100 int len, int is_write)
2101{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002102 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002103}
2104
Alexander Graf582b55a2013-12-11 14:17:44 +01002105enum write_rom_type {
2106 WRITE_DATA,
2107 FLUSH_CACHE,
2108};
2109
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002110static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002111 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002112{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002113 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002114 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002115 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002116 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002117
bellardd0ecd2a2006-04-23 17:14:48 +00002118 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002119 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002120 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002121
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002122 if (!(memory_region_is_ram(mr) ||
2123 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002124 /* do nothing */
2125 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002126 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002127 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002128 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002129 switch (type) {
2130 case WRITE_DATA:
2131 memcpy(ptr, buf, l);
2132 invalidate_and_set_dirty(addr1, l);
2133 break;
2134 case FLUSH_CACHE:
2135 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2136 break;
2137 }
bellardd0ecd2a2006-04-23 17:14:48 +00002138 }
2139 len -= l;
2140 buf += l;
2141 addr += l;
2142 }
2143}
2144
Alexander Graf582b55a2013-12-11 14:17:44 +01002145/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002146void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002147 const uint8_t *buf, int len)
2148{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002149 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002150}
2151
2152void cpu_flush_icache_range(hwaddr start, int len)
2153{
2154 /*
2155 * This function should do the same thing as an icache flush that was
2156 * triggered from within the guest. For TCG we are always cache coherent,
2157 * so there is no need to flush anything. For KVM / Xen we need to flush
2158 * the host's instruction cache at least.
2159 */
2160 if (tcg_enabled()) {
2161 return;
2162 }
2163
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002164 cpu_physical_memory_write_rom_internal(&address_space_memory,
2165 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002166}
2167
aliguori6d16c2f2009-01-22 16:59:11 +00002168typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002169 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002170 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002171 hwaddr addr;
2172 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002173} BounceBuffer;
2174
2175static BounceBuffer bounce;
2176
aliguoriba223c22009-01-22 16:59:16 +00002177typedef struct MapClient {
2178 void *opaque;
2179 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002180 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002181} MapClient;
2182
Blue Swirl72cf2d42009-09-12 07:36:22 +00002183static QLIST_HEAD(map_client_list, MapClient) map_client_list
2184 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002185
2186void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2187{
Anthony Liguori7267c092011-08-20 22:09:37 -05002188 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002189
2190 client->opaque = opaque;
2191 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002192 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002193 return client;
2194}
2195
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002196static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002197{
2198 MapClient *client = (MapClient *)_client;
2199
Blue Swirl72cf2d42009-09-12 07:36:22 +00002200 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002201 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002202}
2203
2204static void cpu_notify_map_clients(void)
2205{
2206 MapClient *client;
2207
Blue Swirl72cf2d42009-09-12 07:36:22 +00002208 while (!QLIST_EMPTY(&map_client_list)) {
2209 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002210 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002211 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002212 }
2213}
2214
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002215bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2216{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002217 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002218 hwaddr l, xlat;
2219
2220 while (len > 0) {
2221 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002222 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2223 if (!memory_access_is_direct(mr, is_write)) {
2224 l = memory_access_size(mr, l, addr);
2225 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002226 return false;
2227 }
2228 }
2229
2230 len -= l;
2231 addr += l;
2232 }
2233 return true;
2234}
2235
aliguori6d16c2f2009-01-22 16:59:11 +00002236/* Map a physical memory region into a host virtual address.
2237 * May map a subset of the requested range, given by and returned in *plen.
2238 * May return NULL if resources needed to perform the mapping are exhausted.
2239 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002240 * Use cpu_register_map_client() to know when retrying the map operation is
2241 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002242 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002243void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002244 hwaddr addr,
2245 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002246 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002247{
Avi Kivitya8170e52012-10-23 12:30:10 +02002248 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002249 hwaddr done = 0;
2250 hwaddr l, xlat, base;
2251 MemoryRegion *mr, *this_mr;
2252 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002253
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002254 if (len == 0) {
2255 return NULL;
2256 }
aliguori6d16c2f2009-01-22 16:59:11 +00002257
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002258 l = len;
2259 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2260 if (!memory_access_is_direct(mr, is_write)) {
2261 if (bounce.buffer) {
2262 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002263 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002264 /* Avoid unbounded allocations */
2265 l = MIN(l, TARGET_PAGE_SIZE);
2266 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002267 bounce.addr = addr;
2268 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002269
2270 memory_region_ref(mr);
2271 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002272 if (!is_write) {
2273 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002274 }
aliguori6d16c2f2009-01-22 16:59:11 +00002275
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002276 *plen = l;
2277 return bounce.buffer;
2278 }
2279
2280 base = xlat;
2281 raddr = memory_region_get_ram_addr(mr);
2282
2283 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002284 len -= l;
2285 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002286 done += l;
2287 if (len == 0) {
2288 break;
2289 }
2290
2291 l = len;
2292 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2293 if (this_mr != mr || xlat != base + done) {
2294 break;
2295 }
aliguori6d16c2f2009-01-22 16:59:11 +00002296 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002297
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002298 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002299 *plen = done;
2300 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002301}
2302
Avi Kivityac1970f2012-10-03 16:22:53 +02002303/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002304 * Will also mark the memory as dirty if is_write == 1. access_len gives
2305 * the amount of memory that was actually read or written by the caller.
2306 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002307void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2308 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002309{
2310 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002311 MemoryRegion *mr;
2312 ram_addr_t addr1;
2313
2314 mr = qemu_ram_addr_from_host(buffer, &addr1);
2315 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002316 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002317 while (access_len) {
2318 unsigned l;
2319 l = TARGET_PAGE_SIZE;
2320 if (l > access_len)
2321 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002322 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002323 addr1 += l;
2324 access_len -= l;
2325 }
2326 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002327 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002328 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002329 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002330 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002331 return;
2332 }
2333 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002334 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002335 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002336 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002337 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002338 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002339 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002340}
bellardd0ecd2a2006-04-23 17:14:48 +00002341
Avi Kivitya8170e52012-10-23 12:30:10 +02002342void *cpu_physical_memory_map(hwaddr addr,
2343 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002344 int is_write)
2345{
2346 return address_space_map(&address_space_memory, addr, plen, is_write);
2347}
2348
Avi Kivitya8170e52012-10-23 12:30:10 +02002349void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2350 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002351{
2352 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2353}
2354
bellard8df1cd02005-01-28 22:37:22 +00002355/* warning: addr must be aligned */
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002356static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002357 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002358{
bellard8df1cd02005-01-28 22:37:22 +00002359 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002360 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002361 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002362 hwaddr l = 4;
2363 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002364
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002365 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002366 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002367 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002368 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002369#if defined(TARGET_WORDS_BIGENDIAN)
2370 if (endian == DEVICE_LITTLE_ENDIAN) {
2371 val = bswap32(val);
2372 }
2373#else
2374 if (endian == DEVICE_BIG_ENDIAN) {
2375 val = bswap32(val);
2376 }
2377#endif
bellard8df1cd02005-01-28 22:37:22 +00002378 } else {
2379 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002380 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002381 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002382 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002383 switch (endian) {
2384 case DEVICE_LITTLE_ENDIAN:
2385 val = ldl_le_p(ptr);
2386 break;
2387 case DEVICE_BIG_ENDIAN:
2388 val = ldl_be_p(ptr);
2389 break;
2390 default:
2391 val = ldl_p(ptr);
2392 break;
2393 }
bellard8df1cd02005-01-28 22:37:22 +00002394 }
2395 return val;
2396}
2397
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002398uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002399{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002400 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002401}
2402
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002403uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002404{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002405 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002406}
2407
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002408uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002409{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002410 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002411}
2412
bellard84b7b8e2005-11-28 21:19:04 +00002413/* warning: addr must be aligned */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002414static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002415 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002416{
bellard84b7b8e2005-11-28 21:19:04 +00002417 uint8_t *ptr;
2418 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002419 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002420 hwaddr l = 8;
2421 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002422
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002423 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002424 false);
2425 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002426 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002427 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002428#if defined(TARGET_WORDS_BIGENDIAN)
2429 if (endian == DEVICE_LITTLE_ENDIAN) {
2430 val = bswap64(val);
2431 }
2432#else
2433 if (endian == DEVICE_BIG_ENDIAN) {
2434 val = bswap64(val);
2435 }
2436#endif
bellard84b7b8e2005-11-28 21:19:04 +00002437 } else {
2438 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002439 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002440 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002441 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002442 switch (endian) {
2443 case DEVICE_LITTLE_ENDIAN:
2444 val = ldq_le_p(ptr);
2445 break;
2446 case DEVICE_BIG_ENDIAN:
2447 val = ldq_be_p(ptr);
2448 break;
2449 default:
2450 val = ldq_p(ptr);
2451 break;
2452 }
bellard84b7b8e2005-11-28 21:19:04 +00002453 }
2454 return val;
2455}
2456
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002457uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002458{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002459 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002460}
2461
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002462uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002463{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002464 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002465}
2466
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002467uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002468{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002469 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002470}
2471
bellardaab33092005-10-30 20:48:42 +00002472/* XXX: optimize */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002473uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002474{
2475 uint8_t val;
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002476 address_space_rw(as, addr, &val, 1, 0);
bellardaab33092005-10-30 20:48:42 +00002477 return val;
2478}
2479
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002480/* warning: addr must be aligned */
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002481static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002482 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002483{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002484 uint8_t *ptr;
2485 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002486 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002487 hwaddr l = 2;
2488 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002489
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002490 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002491 false);
2492 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002493 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002494 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002495#if defined(TARGET_WORDS_BIGENDIAN)
2496 if (endian == DEVICE_LITTLE_ENDIAN) {
2497 val = bswap16(val);
2498 }
2499#else
2500 if (endian == DEVICE_BIG_ENDIAN) {
2501 val = bswap16(val);
2502 }
2503#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002504 } else {
2505 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002506 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002507 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002508 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002509 switch (endian) {
2510 case DEVICE_LITTLE_ENDIAN:
2511 val = lduw_le_p(ptr);
2512 break;
2513 case DEVICE_BIG_ENDIAN:
2514 val = lduw_be_p(ptr);
2515 break;
2516 default:
2517 val = lduw_p(ptr);
2518 break;
2519 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002520 }
2521 return val;
bellardaab33092005-10-30 20:48:42 +00002522}
2523
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002524uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002525{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002526 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002527}
2528
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002529uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002530{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002531 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002532}
2533
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002534uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002535{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002536 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002537}
2538
bellard8df1cd02005-01-28 22:37:22 +00002539/* warning: addr must be aligned. The ram page is not masked as dirty
2540 and the code inside is not invalidated. It is useful if the dirty
2541 bits are used to track modified PTEs */
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002542void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002543{
bellard8df1cd02005-01-28 22:37:22 +00002544 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002545 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002546 hwaddr l = 4;
2547 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002548
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002549 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002550 true);
2551 if (l < 4 || !memory_access_is_direct(mr, true)) {
2552 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002553 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002554 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002555 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002556 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002557
2558 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002559 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002560 /* invalidate code */
2561 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2562 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02002563 cpu_physical_memory_set_dirty_flag(addr1,
2564 DIRTY_MEMORY_MIGRATION);
2565 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
aliguori74576192008-10-06 14:02:03 +00002566 }
2567 }
bellard8df1cd02005-01-28 22:37:22 +00002568 }
2569}
2570
2571/* warning: addr must be aligned */
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002572static inline void stl_phys_internal(AddressSpace *as,
2573 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002574 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002575{
bellard8df1cd02005-01-28 22:37:22 +00002576 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002577 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002578 hwaddr l = 4;
2579 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002580
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002581 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002582 true);
2583 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002584#if defined(TARGET_WORDS_BIGENDIAN)
2585 if (endian == DEVICE_LITTLE_ENDIAN) {
2586 val = bswap32(val);
2587 }
2588#else
2589 if (endian == DEVICE_BIG_ENDIAN) {
2590 val = bswap32(val);
2591 }
2592#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002593 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002594 } else {
bellard8df1cd02005-01-28 22:37:22 +00002595 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002596 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002597 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002598 switch (endian) {
2599 case DEVICE_LITTLE_ENDIAN:
2600 stl_le_p(ptr, val);
2601 break;
2602 case DEVICE_BIG_ENDIAN:
2603 stl_be_p(ptr, val);
2604 break;
2605 default:
2606 stl_p(ptr, val);
2607 break;
2608 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002609 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002610 }
2611}
2612
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002613void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002614{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002615 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002616}
2617
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002618void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002619{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002620 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002621}
2622
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002623void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002624{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002625 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002626}
2627
bellardaab33092005-10-30 20:48:42 +00002628/* XXX: optimize */
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002629void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002630{
2631 uint8_t v = val;
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002632 address_space_rw(as, addr, &v, 1, 1);
bellardaab33092005-10-30 20:48:42 +00002633}
2634
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002635/* warning: addr must be aligned */
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002636static inline void stw_phys_internal(AddressSpace *as,
2637 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002638 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002639{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002640 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002641 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002642 hwaddr l = 2;
2643 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002644
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002645 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002646 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002647#if defined(TARGET_WORDS_BIGENDIAN)
2648 if (endian == DEVICE_LITTLE_ENDIAN) {
2649 val = bswap16(val);
2650 }
2651#else
2652 if (endian == DEVICE_BIG_ENDIAN) {
2653 val = bswap16(val);
2654 }
2655#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002656 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002657 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002658 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002659 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002660 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002661 switch (endian) {
2662 case DEVICE_LITTLE_ENDIAN:
2663 stw_le_p(ptr, val);
2664 break;
2665 case DEVICE_BIG_ENDIAN:
2666 stw_be_p(ptr, val);
2667 break;
2668 default:
2669 stw_p(ptr, val);
2670 break;
2671 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002672 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002673 }
bellardaab33092005-10-30 20:48:42 +00002674}
2675
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002676void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002677{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002678 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002679}
2680
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002681void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002682{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002683 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002684}
2685
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002686void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002687{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002688 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002689}
2690
bellardaab33092005-10-30 20:48:42 +00002691/* XXX: optimize */
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002692void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002693{
2694 val = tswap64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002695 address_space_rw(as, addr, (void *) &val, 8, 1);
bellardaab33092005-10-30 20:48:42 +00002696}
2697
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002698void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002699{
2700 val = cpu_to_le64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002701 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002702}
2703
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002704void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002705{
2706 val = cpu_to_be64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002707 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002708}
2709
aliguori5e2972f2009-03-28 17:51:36 +00002710/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002711int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002712 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002713{
2714 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002715 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002716 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002717
2718 while (len > 0) {
2719 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002720 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002721 /* if no physical page mapped, return an error */
2722 if (phys_addr == -1)
2723 return -1;
2724 l = (page + TARGET_PAGE_SIZE) - addr;
2725 if (l > len)
2726 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002727 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10002728 if (is_write) {
2729 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2730 } else {
2731 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2732 }
bellard13eb76e2004-01-24 15:23:36 +00002733 len -= l;
2734 buf += l;
2735 addr += l;
2736 }
2737 return 0;
2738}
Paul Brooka68fe892010-03-01 00:08:59 +00002739#endif
bellard13eb76e2004-01-24 15:23:36 +00002740
Blue Swirl8e4a4242013-01-06 18:30:17 +00002741#if !defined(CONFIG_USER_ONLY)
2742
2743/*
2744 * A helper function for the _utterly broken_ virtio device model to find out if
2745 * it's running on a big endian machine. Don't do this at home kids!
2746 */
2747bool virtio_is_big_endian(void);
2748bool virtio_is_big_endian(void)
2749{
2750#if defined(TARGET_WORDS_BIGENDIAN)
2751 return true;
2752#else
2753 return false;
2754#endif
2755}
2756
2757#endif
2758
Wen Congyang76f35532012-05-07 12:04:18 +08002759#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002760bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002761{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002762 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002763 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002764
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002765 mr = address_space_translate(&address_space_memory,
2766 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002767
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002768 return !(memory_region_is_ram(mr) ||
2769 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002770}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002771
2772void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2773{
2774 RAMBlock *block;
2775
2776 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2777 func(block->host, block->offset, block->length, opaque);
2778 }
2779}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002780#endif