blob: c03193266a5339afb4d2cd2ddd15c4a3409385f9 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060029#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020032#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010033#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010034#include "qemu/timer.h"
35#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020036#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020052#include "exec/ram_addr.h"
Alexander Graf582b55a2013-12-11 14:17:44 +010053#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020054
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020055#include "qemu/range.h"
56
blueswir1db7b5422007-05-26 17:36:03 +000057//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000058
pbrook99773bd2006-04-16 15:14:59 +000059#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020060static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000061
Paolo Bonzinia3161032012-11-14 15:54:48 +010062RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030063
64static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030065static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030066
Avi Kivityf6790af2012-10-02 20:13:51 +020067AddressSpace address_space_io;
68AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020069
Paolo Bonzini0844e002013-05-24 14:37:28 +020070MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020071static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020072
pbrooke2eef172008-06-08 01:09:01 +000073#endif
bellard9fa3e852004-01-04 18:06:42 +000074
Andreas Färberbdc44642013-06-24 23:50:24 +020075struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000076/* current CPU in the current thread. It is only valid inside
77 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020078DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000079/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000080 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000081 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010082int use_icount;
bellard6a00d602005-11-21 23:25:50 +000083
pbrooke2eef172008-06-08 01:09:01 +000084#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020085
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020086typedef struct PhysPageEntry PhysPageEntry;
87
88struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020089 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020090 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020091 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020092 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020093};
94
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020095#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96
Paolo Bonzini03f49952013-11-07 17:14:36 +010097/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010098#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +010099
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200100#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100101#define P_L2_SIZE (1 << P_L2_BITS)
102
103#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104
105typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200106
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200107typedef struct PhysPageMap {
108 unsigned sections_nb;
109 unsigned sections_nb_alloc;
110 unsigned nodes_nb;
111 unsigned nodes_nb_alloc;
112 Node *nodes;
113 MemoryRegionSection *sections;
114} PhysPageMap;
115
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200116struct AddressSpaceDispatch {
117 /* This is a multi-level map on the physical address space.
118 * The bottom level has pointers to MemoryRegionSections.
119 */
120 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200121 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200122 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200123};
124
Jan Kiszka90260c62013-05-26 21:46:51 +0200125#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
126typedef struct subpage_t {
127 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200128 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200129 hwaddr base;
130 uint16_t sub_section[TARGET_PAGE_SIZE];
131} subpage_t;
132
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200133#define PHYS_SECTION_UNASSIGNED 0
134#define PHYS_SECTION_NOTDIRTY 1
135#define PHYS_SECTION_ROM 2
136#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200137
pbrooke2eef172008-06-08 01:09:01 +0000138static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300139static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000140static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000141
Avi Kivity1ec9b902012-01-02 12:47:48 +0200142static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000143#endif
bellard54936002003-05-13 00:25:15 +0000144
Paul Brook6d9a1302010-02-28 23:55:53 +0000145#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200146
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200148{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200149 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
151 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
152 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200153 }
154}
155
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200156static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200157{
158 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200159 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200160
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200161 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200163 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100164 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 map->nodes[ret][i].skip = 1;
166 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200169}
170
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200171static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
172 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200173 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200174{
175 PhysPageEntry *p;
176 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100177 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200178
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200179 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200180 lp->ptr = phys_map_node_alloc(map);
181 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200182 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100183 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200184 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200185 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186 }
187 }
188 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100191 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192
Paolo Bonzini03f49952013-11-07 17:14:36 +0100193 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200194 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200195 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200197 *index += step;
198 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200199 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200201 }
202 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200203 }
204}
205
Avi Kivityac1970f2012-10-03 16:22:53 +0200206static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200207 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200208 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000209{
Avi Kivity29990972012-02-13 20:21:20 +0200210 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200211 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000214}
215
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200216/* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
218 */
219static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
220{
221 unsigned valid_ptr = P_L2_SIZE;
222 int valid = 0;
223 PhysPageEntry *p;
224 int i;
225
226 if (lp->ptr == PHYS_MAP_NODE_NIL) {
227 return;
228 }
229
230 p = nodes[lp->ptr];
231 for (i = 0; i < P_L2_SIZE; i++) {
232 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
233 continue;
234 }
235
236 valid_ptr = i;
237 valid++;
238 if (p[i].skip) {
239 phys_page_compact(&p[i], nodes, compacted);
240 }
241 }
242
243 /* We can only compress if there's only one child. */
244 if (valid != 1) {
245 return;
246 }
247
248 assert(valid_ptr < P_L2_SIZE);
249
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
252 return;
253 }
254
255 lp->ptr = p[valid_ptr].ptr;
256 if (!p[valid_ptr].skip) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
261 * change this rule.
262 */
263 lp->skip = 0;
264 } else {
265 lp->skip += p[valid_ptr].skip;
266 }
267}
268
269static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
270{
271 DECLARE_BITMAP(compacted, nodes_nb);
272
273 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200274 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200275 }
276}
277
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200278static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200279 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000280{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200281 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200282 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200283 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200284
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200285 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200286 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200287 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200288 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200289 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100290 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200291 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200292
293 if (sections[lp.ptr].size.hi ||
294 range_covers_byte(sections[lp.ptr].offset_within_address_space,
295 sections[lp.ptr].size.lo, addr)) {
296 return &sections[lp.ptr];
297 } else {
298 return &sections[PHYS_SECTION_UNASSIGNED];
299 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200300}
301
Blue Swirle5548612012-04-21 13:08:33 +0000302bool memory_region_is_unassigned(MemoryRegion *mr)
303{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200304 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000305 && mr != &io_mem_watch;
306}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200307
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200308static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200309 hwaddr addr,
310 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200311{
Jan Kiszka90260c62013-05-26 21:46:51 +0200312 MemoryRegionSection *section;
313 subpage_t *subpage;
314
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200315 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200316 if (resolve_subpage && section->mr->subpage) {
317 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200318 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200319 }
320 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200321}
322
Jan Kiszka90260c62013-05-26 21:46:51 +0200323static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200326{
327 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100328 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200329
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200330 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200331 /* Compute offset within MemoryRegionSection */
332 addr -= section->offset_within_address_space;
333
334 /* Compute offset within MemoryRegion */
335 *xlat = addr + section->offset_within_region;
336
337 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100338 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200339 return section;
340}
Jan Kiszka90260c62013-05-26 21:46:51 +0200341
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100342static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
343{
344 if (memory_region_is_ram(mr)) {
345 return !(is_write && mr->readonly);
346 }
347 if (memory_region_is_romd(mr)) {
348 return !is_write;
349 }
350
351 return false;
352}
353
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200354MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
355 hwaddr *xlat, hwaddr *plen,
356 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200357{
Avi Kivity30951152012-10-30 13:47:46 +0200358 IOMMUTLBEntry iotlb;
359 MemoryRegionSection *section;
360 MemoryRegion *mr;
361 hwaddr len = *plen;
362
363 for (;;) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200365 mr = section->mr;
366
367 if (!mr->iommu_ops) {
368 break;
369 }
370
371 iotlb = mr->iommu_ops->translate(mr, addr);
372 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
373 | (addr & iotlb.addr_mask));
374 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
375 if (!(iotlb.perm & (1 << is_write))) {
376 mr = &io_mem_unassigned;
377 break;
378 }
379
380 as = iotlb.target_as;
381 }
382
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100383 if (memory_access_is_direct(mr, is_write)) {
384 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
385 len = MIN(page, len);
386 }
387
Avi Kivity30951152012-10-30 13:47:46 +0200388 *plen = len;
389 *xlat = addr;
390 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200391}
392
393MemoryRegionSection *
394address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
395 hwaddr *plen)
396{
Avi Kivity30951152012-10-30 13:47:46 +0200397 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200398 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200399
400 assert(!section->mr->iommu_ops);
401 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200402}
bellard9fa3e852004-01-04 18:06:42 +0000403#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000404
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200405void cpu_exec_init_all(void)
406{
407#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700408 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200409 memory_map_init();
410 io_mem_init();
411#endif
412}
413
Andreas Färberb170fce2013-01-20 20:23:22 +0100414#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000415
Juan Quintelae59fb372009-09-29 22:48:21 +0200416static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200417{
Andreas Färber259186a2013-01-17 18:51:17 +0100418 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200419
aurel323098dba2009-03-07 21:28:24 +0000420 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
421 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100422 cpu->interrupt_request &= ~0x01;
423 tlb_flush(cpu->env_ptr, 1);
pbrook9656f322008-07-01 20:01:19 +0000424
425 return 0;
426}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200427
Andreas Färber1a1562f2013-06-17 04:09:11 +0200428const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200429 .name = "cpu_common",
430 .version_id = 1,
431 .minimum_version_id = 1,
432 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200433 .post_load = cpu_common_post_load,
434 .fields = (VMStateField []) {
Andreas Färber259186a2013-01-17 18:51:17 +0100435 VMSTATE_UINT32(halted, CPUState),
436 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200437 VMSTATE_END_OF_LIST()
438 }
439};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200440
pbrook9656f322008-07-01 20:01:19 +0000441#endif
442
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100443CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400444{
Andreas Färberbdc44642013-06-24 23:50:24 +0200445 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400446
Andreas Färberbdc44642013-06-24 23:50:24 +0200447 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100448 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200449 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100450 }
Glauber Costa950f1472009-06-09 12:15:18 -0400451 }
452
Andreas Färberbdc44642013-06-24 23:50:24 +0200453 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400454}
455
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000456#if !defined(CONFIG_USER_ONLY)
457void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
458{
459 /* We only support one address space per cpu at the moment. */
460 assert(cpu->as == as);
461
462 if (cpu->tcg_as_listener) {
463 memory_listener_unregister(cpu->tcg_as_listener);
464 } else {
465 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
466 }
467 cpu->tcg_as_listener->commit = tcg_commit;
468 memory_listener_register(cpu->tcg_as_listener, as);
469}
470#endif
471
Andreas Färber9349b4f2012-03-14 01:38:32 +0100472void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000473{
Andreas Färber9f09e182012-05-03 06:59:07 +0200474 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100475 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200476 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000477 int cpu_index;
478
pbrookc2764712009-03-07 15:24:59 +0000479#if defined(CONFIG_USER_ONLY)
480 cpu_list_lock();
481#endif
bellard6a00d602005-11-21 23:25:50 +0000482 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200483 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000484 cpu_index++;
485 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100486 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100487 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200488 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200489 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100490#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000491 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200492 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100493#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200494 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000495#if defined(CONFIG_USER_ONLY)
496 cpu_list_unlock();
497#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200498 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
499 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
500 }
pbrookb3c77242008-06-30 16:31:04 +0000501#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600502 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000503 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100504 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200505 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000506#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100507 if (cc->vmsd != NULL) {
508 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
509 }
bellardfd6ce8f2003-05-14 19:00:11 +0000510}
511
bellard1fddef42005-04-17 19:16:13 +0000512#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000513#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200514static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000515{
516 tb_invalidate_phys_page_range(pc, pc + 1, 0);
517}
518#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200519static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400520{
Max Filippove8262a12013-09-27 22:29:17 +0400521 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
522 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000523 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100524 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400525 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400526}
bellardc27004e2005-01-03 23:35:10 +0000527#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000528#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000529
Paul Brookc527ee82010-03-01 03:31:14 +0000530#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200531void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000532
533{
534}
535
Andreas Färber75a34032013-09-02 16:57:02 +0200536int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000537 int flags, CPUWatchpoint **watchpoint)
538{
539 return -ENOSYS;
540}
541#else
pbrook6658ffb2007-03-16 23:58:11 +0000542/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200543int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000544 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000545{
Andreas Färber75a34032013-09-02 16:57:02 +0200546 vaddr len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000547 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000548
aliguorib4051332008-11-18 20:14:20 +0000549 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400550 if ((len & (len - 1)) || (addr & ~len_mask) ||
551 len == 0 || len > TARGET_PAGE_SIZE) {
Andreas Färber75a34032013-09-02 16:57:02 +0200552 error_report("tried to set invalid watchpoint at %"
553 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000554 return -EINVAL;
555 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500556 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000557
aliguoria1d1bb32008-11-18 20:07:32 +0000558 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000559 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000560 wp->flags = flags;
561
aliguori2dc9f412008-11-18 20:56:59 +0000562 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200563 if (flags & BP_GDB) {
564 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
565 } else {
566 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
567 }
aliguoria1d1bb32008-11-18 20:07:32 +0000568
Andreas Färber31b030d2013-09-04 01:29:02 +0200569 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000570
571 if (watchpoint)
572 *watchpoint = wp;
573 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000574}
575
aliguoria1d1bb32008-11-18 20:07:32 +0000576/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200577int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000578 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000579{
Andreas Färber75a34032013-09-02 16:57:02 +0200580 vaddr len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000581 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000582
Andreas Färberff4700b2013-08-26 18:23:18 +0200583 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000584 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000585 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200586 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000587 return 0;
588 }
589 }
aliguoria1d1bb32008-11-18 20:07:32 +0000590 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000591}
592
aliguoria1d1bb32008-11-18 20:07:32 +0000593/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200594void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000595{
Andreas Färberff4700b2013-08-26 18:23:18 +0200596 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000597
Andreas Färber31b030d2013-09-04 01:29:02 +0200598 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000599
Anthony Liguori7267c092011-08-20 22:09:37 -0500600 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000601}
602
aliguoria1d1bb32008-11-18 20:07:32 +0000603/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200604void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000605{
aliguoric0ce9982008-11-25 22:13:57 +0000606 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000607
Andreas Färberff4700b2013-08-26 18:23:18 +0200608 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200609 if (wp->flags & mask) {
610 cpu_watchpoint_remove_by_ref(cpu, wp);
611 }
aliguoric0ce9982008-11-25 22:13:57 +0000612 }
aliguoria1d1bb32008-11-18 20:07:32 +0000613}
Paul Brookc527ee82010-03-01 03:31:14 +0000614#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000615
616/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200617int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000618 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000619{
bellard1fddef42005-04-17 19:16:13 +0000620#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000621 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000622
Anthony Liguori7267c092011-08-20 22:09:37 -0500623 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000624
625 bp->pc = pc;
626 bp->flags = flags;
627
aliguori2dc9f412008-11-18 20:56:59 +0000628 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200629 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200630 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200631 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200632 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200633 }
aliguoria1d1bb32008-11-18 20:07:32 +0000634
Andreas Färberf0c3c502013-08-26 21:22:53 +0200635 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000636
Andreas Färber00b941e2013-06-29 18:55:54 +0200637 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000638 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200639 }
aliguoria1d1bb32008-11-18 20:07:32 +0000640 return 0;
641#else
642 return -ENOSYS;
643#endif
644}
645
646/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200647int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000648{
649#if defined(TARGET_HAS_ICE)
650 CPUBreakpoint *bp;
651
Andreas Färberf0c3c502013-08-26 21:22:53 +0200652 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000653 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200654 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000655 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000656 }
bellard4c3a88a2003-07-26 12:06:08 +0000657 }
aliguoria1d1bb32008-11-18 20:07:32 +0000658 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000659#else
aliguoria1d1bb32008-11-18 20:07:32 +0000660 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000661#endif
662}
663
aliguoria1d1bb32008-11-18 20:07:32 +0000664/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200665void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000666{
bellard1fddef42005-04-17 19:16:13 +0000667#if defined(TARGET_HAS_ICE)
Andreas Färberf0c3c502013-08-26 21:22:53 +0200668 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
669
670 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000671
Anthony Liguori7267c092011-08-20 22:09:37 -0500672 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000673#endif
674}
675
676/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200677void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000678{
679#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000680 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000681
Andreas Färberf0c3c502013-08-26 21:22:53 +0200682 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200683 if (bp->flags & mask) {
684 cpu_breakpoint_remove_by_ref(cpu, bp);
685 }
aliguoric0ce9982008-11-25 22:13:57 +0000686 }
bellard4c3a88a2003-07-26 12:06:08 +0000687#endif
688}
689
bellardc33a3462003-07-29 20:50:33 +0000690/* enable or disable single step mode. EXCP_DEBUG is returned by the
691 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200692void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000693{
bellard1fddef42005-04-17 19:16:13 +0000694#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200695 if (cpu->singlestep_enabled != enabled) {
696 cpu->singlestep_enabled = enabled;
697 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200698 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200699 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100700 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000701 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200702 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000703 tb_flush(env);
704 }
bellardc33a3462003-07-29 20:50:33 +0000705 }
706#endif
707}
708
Andreas Färbera47dddd2013-09-03 17:38:47 +0200709void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000710{
711 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000712 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000713
714 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000715 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000716 fprintf(stderr, "qemu: fatal: ");
717 vfprintf(stderr, fmt, ap);
718 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200719 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000720 if (qemu_log_enabled()) {
721 qemu_log("qemu: fatal: ");
722 qemu_log_vprintf(fmt, ap2);
723 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200724 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000725 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000726 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000727 }
pbrook493ae1f2007-11-23 16:53:59 +0000728 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000729 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200730#if defined(CONFIG_USER_ONLY)
731 {
732 struct sigaction act;
733 sigfillset(&act.sa_mask);
734 act.sa_handler = SIG_DFL;
735 sigaction(SIGABRT, &act, NULL);
736 }
737#endif
bellard75012672003-06-21 13:11:07 +0000738 abort();
739}
740
bellard01243112004-01-04 15:48:17 +0000741#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200742static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
743{
744 RAMBlock *block;
745
746 /* The list is protected by the iothread lock here. */
747 block = ram_list.mru_block;
748 if (block && addr - block->offset < block->length) {
749 goto found;
750 }
751 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
752 if (addr - block->offset < block->length) {
753 goto found;
754 }
755 }
756
757 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
758 abort();
759
760found:
761 ram_list.mru_block = block;
762 return block;
763}
764
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200765static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000766{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200767 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200768 RAMBlock *block;
769 ram_addr_t end;
770
771 end = TARGET_PAGE_ALIGN(start + length);
772 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000773
Paolo Bonzini041603f2013-09-09 17:49:45 +0200774 block = qemu_get_ram_block(start);
775 assert(block == qemu_get_ram_block(end - 1));
776 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000777 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200778}
779
780/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200781void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200782 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200783{
Juan Quintelad24981d2012-05-22 00:42:40 +0200784 if (length == 0)
785 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200786 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200787
788 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200789 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200790 }
bellard1ccde1c2004-02-06 19:46:14 +0000791}
792
Juan Quintela981fdf22013-10-10 11:54:09 +0200793static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000794{
795 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000796}
797
Andreas Färberbb0e6272013-09-03 13:32:01 +0200798hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200799 MemoryRegionSection *section,
800 target_ulong vaddr,
801 hwaddr paddr, hwaddr xlat,
802 int prot,
803 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000804{
Avi Kivitya8170e52012-10-23 12:30:10 +0200805 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000806 CPUWatchpoint *wp;
807
Blue Swirlcc5bea62012-04-14 14:56:48 +0000808 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000809 /* Normal RAM. */
810 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200811 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000812 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200813 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000814 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200815 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000816 }
817 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100818 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200819 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000820 }
821
822 /* Make accesses to pages with watchpoints go via the
823 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200824 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Blue Swirle5548612012-04-21 13:08:33 +0000825 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
826 /* Avoid trapping reads of pages with a write breakpoint. */
827 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200828 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000829 *address |= TLB_MMIO;
830 break;
831 }
832 }
833 }
834
835 return iotlb;
836}
bellard9fa3e852004-01-04 18:06:42 +0000837#endif /* defined(CONFIG_USER_ONLY) */
838
pbrooke2eef172008-06-08 01:09:01 +0000839#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000840
Anthony Liguoric227f092009-10-01 16:12:16 -0500841static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200842 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200843static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200844
Stefan Weil575ddeb2013-09-29 20:56:45 +0200845static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200846
847/*
848 * Set a custom physical guest memory alloator.
849 * Accelerators with unusual needs may need this. Hopefully, we can
850 * get rid of it eventually.
851 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200852void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200853{
854 phys_mem_alloc = alloc;
855}
856
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200857static uint16_t phys_section_add(PhysPageMap *map,
858 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200859{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200860 /* The physical section number is ORed with a page-aligned
861 * pointer to produce the iotlb entries. Thus it should
862 * never overflow into the page-aligned value.
863 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200864 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200865
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200866 if (map->sections_nb == map->sections_nb_alloc) {
867 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
868 map->sections = g_renew(MemoryRegionSection, map->sections,
869 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200870 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200871 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200872 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200873 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200874}
875
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200876static void phys_section_destroy(MemoryRegion *mr)
877{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200878 memory_region_unref(mr);
879
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200880 if (mr->subpage) {
881 subpage_t *subpage = container_of(mr, subpage_t, iomem);
882 memory_region_destroy(&subpage->iomem);
883 g_free(subpage);
884 }
885}
886
Paolo Bonzini60926662013-05-29 12:30:26 +0200887static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200888{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200889 while (map->sections_nb > 0) {
890 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200891 phys_section_destroy(section->mr);
892 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200893 g_free(map->sections);
894 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200895}
896
Avi Kivityac1970f2012-10-03 16:22:53 +0200897static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200898{
899 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200900 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200901 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200902 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200903 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200904 MemoryRegionSection subsection = {
905 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200906 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200907 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200908 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200909
Avi Kivityf3705d52012-03-08 16:16:34 +0200910 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200911
Avi Kivityf3705d52012-03-08 16:16:34 +0200912 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200913 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100914 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200915 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200916 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200917 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200918 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200919 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200920 }
921 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200922 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200923 subpage_register(subpage, start, end,
924 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200925}
926
927
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200928static void register_multipage(AddressSpaceDispatch *d,
929 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000930{
Avi Kivitya8170e52012-10-23 12:30:10 +0200931 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200932 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200933 uint64_t num_pages = int128_get64(int128_rshift(section->size,
934 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200935
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200936 assert(num_pages);
937 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000938}
939
Avi Kivityac1970f2012-10-03 16:22:53 +0200940static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200941{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200942 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200943 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200944 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200945 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200946
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200947 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
948 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
949 - now.offset_within_address_space;
950
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200951 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200952 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200953 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200954 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200955 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200956 while (int128_ne(remain.size, now.size)) {
957 remain.size = int128_sub(remain.size, now.size);
958 remain.offset_within_address_space += int128_get64(now.size);
959 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400960 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200961 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200962 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800963 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200964 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200965 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400966 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200967 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200968 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400969 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200970 }
971}
972
Sheng Yang62a27442010-01-26 19:21:16 +0800973void qemu_flush_coalesced_mmio_buffer(void)
974{
975 if (kvm_enabled())
976 kvm_flush_coalesced_mmio_buffer();
977}
978
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700979void qemu_mutex_lock_ramlist(void)
980{
981 qemu_mutex_lock(&ram_list.mutex);
982}
983
984void qemu_mutex_unlock_ramlist(void)
985{
986 qemu_mutex_unlock(&ram_list.mutex);
987}
988
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200989#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300990
991#include <sys/vfs.h>
992
993#define HUGETLBFS_MAGIC 0x958458f6
994
995static long gethugepagesize(const char *path)
996{
997 struct statfs fs;
998 int ret;
999
1000 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001001 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001002 } while (ret != 0 && errno == EINTR);
1003
1004 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001005 perror(path);
1006 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001007 }
1008
1009 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001010 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001011
1012 return fs.f_bsize;
1013}
1014
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001015static sigjmp_buf sigjump;
1016
1017static void sigbus_handler(int signal)
1018{
1019 siglongjmp(sigjump, 1);
1020}
1021
Alex Williamson04b16652010-07-02 11:13:17 -06001022static void *file_ram_alloc(RAMBlock *block,
1023 ram_addr_t memory,
1024 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001025{
1026 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001027 char *sanitized_name;
1028 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001029 void *area;
1030 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001031 unsigned long hpagesize;
1032
1033 hpagesize = gethugepagesize(path);
1034 if (!hpagesize) {
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001035 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001036 }
1037
1038 if (memory < hpagesize) {
1039 return NULL;
1040 }
1041
1042 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1043 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001044 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001045 }
1046
Peter Feiner8ca761f2013-03-04 13:54:25 -05001047 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1048 sanitized_name = g_strdup(block->mr->name);
1049 for (c = sanitized_name; *c != '\0'; c++) {
1050 if (*c == '/')
1051 *c = '_';
1052 }
1053
1054 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1055 sanitized_name);
1056 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001057
1058 fd = mkstemp(filename);
1059 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001060 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001061 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001062 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001063 }
1064 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001065 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001066
1067 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1068
1069 /*
1070 * ftruncate is not supported by hugetlbfs in older
1071 * hosts, so don't bother bailing out on errors.
1072 * If anything goes wrong with it under other filesystems,
1073 * mmap will fail.
1074 */
1075 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001076 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001077
Marcelo Tosattic9027602010-03-01 20:25:08 -03001078 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001079 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001080 perror("file_ram_alloc: can't mmap RAM pages");
1081 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001082 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001083 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001084
1085 if (mem_prealloc) {
1086 int ret, i;
1087 struct sigaction act, oldact;
1088 sigset_t set, oldset;
1089
1090 memset(&act, 0, sizeof(act));
1091 act.sa_handler = &sigbus_handler;
1092 act.sa_flags = 0;
1093
1094 ret = sigaction(SIGBUS, &act, &oldact);
1095 if (ret) {
1096 perror("file_ram_alloc: failed to install signal handler");
1097 exit(1);
1098 }
1099
1100 /* unblock SIGBUS */
1101 sigemptyset(&set);
1102 sigaddset(&set, SIGBUS);
1103 pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
1104
1105 if (sigsetjmp(sigjump, 1)) {
1106 fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
1107 exit(1);
1108 }
1109
1110 /* MAP_POPULATE silently ignores failures */
Marcelo Tosatti2ba82852013-12-18 16:42:17 -02001111 for (i = 0; i < (memory/hpagesize); i++) {
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001112 memset(area + (hpagesize*i), 0, 1);
1113 }
1114
1115 ret = sigaction(SIGBUS, &oldact, NULL);
1116 if (ret) {
1117 perror("file_ram_alloc: failed to reinstall signal handler");
1118 exit(1);
1119 }
1120
1121 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1122 }
1123
Alex Williamson04b16652010-07-02 11:13:17 -06001124 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001125 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001126
1127error:
1128 if (mem_prealloc) {
1129 exit(1);
1130 }
1131 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001132}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001133#else
1134static void *file_ram_alloc(RAMBlock *block,
1135 ram_addr_t memory,
1136 const char *path)
1137{
1138 fprintf(stderr, "-mem-path not supported on this host\n");
1139 exit(1);
1140}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001141#endif
1142
Alex Williamsond17b5282010-06-25 11:08:38 -06001143static ram_addr_t find_ram_offset(ram_addr_t size)
1144{
Alex Williamson04b16652010-07-02 11:13:17 -06001145 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001146 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001147
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001148 assert(size != 0); /* it would hand out same offset multiple times */
1149
Paolo Bonzinia3161032012-11-14 15:54:48 +01001150 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001151 return 0;
1152
Paolo Bonzinia3161032012-11-14 15:54:48 +01001153 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001154 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001155
1156 end = block->offset + block->length;
1157
Paolo Bonzinia3161032012-11-14 15:54:48 +01001158 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001159 if (next_block->offset >= end) {
1160 next = MIN(next, next_block->offset);
1161 }
1162 }
1163 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001164 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001165 mingap = next - end;
1166 }
1167 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001168
1169 if (offset == RAM_ADDR_MAX) {
1170 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1171 (uint64_t)size);
1172 abort();
1173 }
1174
Alex Williamson04b16652010-07-02 11:13:17 -06001175 return offset;
1176}
1177
Juan Quintela652d7ec2012-07-20 10:37:54 +02001178ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001179{
Alex Williamsond17b5282010-06-25 11:08:38 -06001180 RAMBlock *block;
1181 ram_addr_t last = 0;
1182
Paolo Bonzinia3161032012-11-14 15:54:48 +01001183 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001184 last = MAX(last, block->offset + block->length);
1185
1186 return last;
1187}
1188
Jason Baronddb97f12012-08-02 15:44:16 -04001189static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1190{
1191 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001192
1193 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001194 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1195 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001196 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1197 if (ret) {
1198 perror("qemu_madvise");
1199 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1200 "but dump_guest_core=off specified\n");
1201 }
1202 }
1203}
1204
Avi Kivityc5705a72011-12-20 15:59:12 +02001205void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001206{
1207 RAMBlock *new_block, *block;
1208
Avi Kivityc5705a72011-12-20 15:59:12 +02001209 new_block = NULL;
Paolo Bonzinia3161032012-11-14 15:54:48 +01001210 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001211 if (block->offset == addr) {
1212 new_block = block;
1213 break;
1214 }
1215 }
1216 assert(new_block);
1217 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001218
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001219 if (dev) {
1220 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001221 if (id) {
1222 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001223 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001224 }
1225 }
1226 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1227
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001228 /* This assumes the iothread lock is taken here too. */
1229 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001230 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001231 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001232 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1233 new_block->idstr);
1234 abort();
1235 }
1236 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001237 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001238}
1239
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001240static int memory_try_enable_merging(void *addr, size_t len)
1241{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001242 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001243 /* disabled by the user */
1244 return 0;
1245 }
1246
1247 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1248}
1249
Avi Kivityc5705a72011-12-20 15:59:12 +02001250ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1251 MemoryRegion *mr)
1252{
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001253 RAMBlock *block, *new_block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001254 ram_addr_t old_ram_size, new_ram_size;
1255
1256 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001257
1258 size = TARGET_PAGE_ALIGN(size);
1259 new_block = g_malloc0(sizeof(*new_block));
Markus Armbruster3435f392013-07-31 15:11:07 +02001260 new_block->fd = -1;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001261
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001262 /* This assumes the iothread lock is taken here too. */
1263 qemu_mutex_lock_ramlist();
Avi Kivity7c637362011-12-21 13:09:49 +02001264 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01001265 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001266 if (host) {
1267 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001268 new_block->flags |= RAM_PREALLOC_MASK;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001269 } else if (xen_enabled()) {
1270 if (mem_path) {
1271 fprintf(stderr, "-mem-path not supported with Xen\n");
1272 exit(1);
1273 }
1274 xen_ram_alloc(new_block->offset, size, mr);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001275 } else {
1276 if (mem_path) {
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001277 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1278 /*
1279 * file_ram_alloc() needs to allocate just like
1280 * phys_mem_alloc, but we haven't bothered to provide
1281 * a hook there.
1282 */
1283 fprintf(stderr,
1284 "-mem-path not supported with this accelerator\n");
1285 exit(1);
1286 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001287 new_block->host = file_ram_alloc(new_block, size, mem_path);
Markus Armbruster0628c182013-07-31 15:11:06 +02001288 }
1289 if (!new_block->host) {
Markus Armbruster91138032013-07-31 15:11:08 +02001290 new_block->host = phys_mem_alloc(size);
Markus Armbruster39228252013-07-31 15:11:11 +02001291 if (!new_block->host) {
1292 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1293 new_block->mr->name, strerror(errno));
1294 exit(1);
1295 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001296 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001297 }
1298 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001299 new_block->length = size;
1300
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001301 /* Keep the list sorted from biggest to smallest block. */
1302 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1303 if (block->length < new_block->length) {
1304 break;
1305 }
1306 }
1307 if (block) {
1308 QTAILQ_INSERT_BEFORE(block, new_block, next);
1309 } else {
1310 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1311 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001312 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001313
Umesh Deshpandef798b072011-08-18 11:41:17 -07001314 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001315 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001316
Juan Quintela2152f5c2013-10-08 13:52:02 +02001317 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1318
1319 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001320 int i;
1321 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1322 ram_list.dirty_memory[i] =
1323 bitmap_zero_extend(ram_list.dirty_memory[i],
1324 old_ram_size, new_ram_size);
1325 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001326 }
Juan Quintela75218e72013-10-08 12:31:54 +02001327 cpu_physical_memory_set_dirty_range(new_block->offset, size);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001328
Jason Baronddb97f12012-08-02 15:44:16 -04001329 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03001330 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Andrea Arcangeli3e469db2013-07-25 12:11:15 +02001331 qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001332
Cam Macdonell84b89d72010-07-26 18:10:57 -06001333 if (kvm_enabled())
1334 kvm_setup_guest_memory(new_block->host, size);
1335
1336 return new_block->offset;
1337}
1338
Avi Kivityc5705a72011-12-20 15:59:12 +02001339ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001340{
Avi Kivityc5705a72011-12-20 15:59:12 +02001341 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001342}
bellarde9a1ab12007-02-08 23:08:38 +00001343
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001344void qemu_ram_free_from_ptr(ram_addr_t addr)
1345{
1346 RAMBlock *block;
1347
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001348 /* This assumes the iothread lock is taken here too. */
1349 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001350 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001351 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001352 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001353 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001354 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001355 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001356 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001357 }
1358 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001359 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001360}
1361
Anthony Liguoric227f092009-10-01 16:12:16 -05001362void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001363{
Alex Williamson04b16652010-07-02 11:13:17 -06001364 RAMBlock *block;
1365
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001366 /* This assumes the iothread lock is taken here too. */
1367 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001368 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001369 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001370 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001371 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001372 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001373 if (block->flags & RAM_PREALLOC_MASK) {
1374 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001375 } else if (xen_enabled()) {
1376 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001377#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001378 } else if (block->fd >= 0) {
1379 munmap(block->host, block->length);
1380 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001381#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001382 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001383 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001384 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001385 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001386 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001387 }
1388 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001389 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001390
bellarde9a1ab12007-02-08 23:08:38 +00001391}
1392
Huang Yingcd19cfa2011-03-02 08:56:19 +01001393#ifndef _WIN32
1394void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1395{
1396 RAMBlock *block;
1397 ram_addr_t offset;
1398 int flags;
1399 void *area, *vaddr;
1400
Paolo Bonzinia3161032012-11-14 15:54:48 +01001401 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001402 offset = addr - block->offset;
1403 if (offset < block->length) {
1404 vaddr = block->host + offset;
1405 if (block->flags & RAM_PREALLOC_MASK) {
1406 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001407 } else if (xen_enabled()) {
1408 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001409 } else {
1410 flags = MAP_FIXED;
1411 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001412 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001413#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001414 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1415 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001416#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001417 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001418#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001419 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1420 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001421 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001422 /*
1423 * Remap needs to match alloc. Accelerators that
1424 * set phys_mem_alloc never remap. If they did,
1425 * we'd need a remap hook here.
1426 */
1427 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1428
Huang Yingcd19cfa2011-03-02 08:56:19 +01001429 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1430 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1431 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001432 }
1433 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001434 fprintf(stderr, "Could not remap addr: "
1435 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001436 length, addr);
1437 exit(1);
1438 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001439 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001440 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001441 }
1442 return;
1443 }
1444 }
1445}
1446#endif /* !_WIN32 */
1447
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001448/* Return a host pointer to ram allocated with qemu_ram_alloc.
1449 With the exception of the softmmu code in this file, this should
1450 only be used for local memory (e.g. video ram) that the device owns,
1451 and knows it isn't going to access beyond the end of the block.
1452
1453 It should not be used for general purpose DMA.
1454 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1455 */
1456void *qemu_get_ram_ptr(ram_addr_t addr)
1457{
1458 RAMBlock *block = qemu_get_ram_block(addr);
1459
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001460 if (xen_enabled()) {
1461 /* We need to check if the requested address is in the RAM
1462 * because we don't want to map the entire memory in QEMU.
1463 * In that case just map until the end of the page.
1464 */
1465 if (block->offset == 0) {
1466 return xen_map_cache(addr, 0, 0);
1467 } else if (block->host == NULL) {
1468 block->host =
1469 xen_map_cache(block->offset, block->length, 1);
1470 }
1471 }
1472 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001473}
1474
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001475/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1476 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001477static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001478{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001479 if (*size == 0) {
1480 return NULL;
1481 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001482 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001483 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001484 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001485 RAMBlock *block;
1486
Paolo Bonzinia3161032012-11-14 15:54:48 +01001487 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001488 if (addr - block->offset < block->length) {
1489 if (addr - block->offset + *size > block->length)
1490 *size = block->length - addr + block->offset;
1491 return block->host + (addr - block->offset);
1492 }
1493 }
1494
1495 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1496 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001497 }
1498}
1499
Paolo Bonzini7443b432013-06-03 12:44:02 +02001500/* Some of the softmmu routines need to translate from a host pointer
1501 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001502MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001503{
pbrook94a6b542009-04-11 17:15:54 +00001504 RAMBlock *block;
1505 uint8_t *host = ptr;
1506
Jan Kiszka868bb332011-06-21 22:59:09 +02001507 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001508 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001509 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001510 }
1511
Paolo Bonzini23887b72013-05-06 14:28:39 +02001512 block = ram_list.mru_block;
1513 if (block && block->host && host - block->host < block->length) {
1514 goto found;
1515 }
1516
Paolo Bonzinia3161032012-11-14 15:54:48 +01001517 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001518 /* This case append when the block is not mapped. */
1519 if (block->host == NULL) {
1520 continue;
1521 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001522 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001523 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001524 }
pbrook94a6b542009-04-11 17:15:54 +00001525 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001526
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001527 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001528
1529found:
1530 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001531 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001532}
Alex Williamsonf471a172010-06-11 11:11:42 -06001533
Avi Kivitya8170e52012-10-23 12:30:10 +02001534static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001535 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001536{
Juan Quintela52159192013-10-08 12:44:04 +02001537 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001538 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001539 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001540 switch (size) {
1541 case 1:
1542 stb_p(qemu_get_ram_ptr(ram_addr), val);
1543 break;
1544 case 2:
1545 stw_p(qemu_get_ram_ptr(ram_addr), val);
1546 break;
1547 case 4:
1548 stl_p(qemu_get_ram_ptr(ram_addr), val);
1549 break;
1550 default:
1551 abort();
1552 }
Juan Quintela52159192013-10-08 12:44:04 +02001553 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1554 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
bellardf23db162005-08-21 19:12:28 +00001555 /* we remove the notdirty callback only if the code has been
1556 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001557 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001558 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001559 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001560 }
bellard1ccde1c2004-02-06 19:46:14 +00001561}
1562
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001563static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1564 unsigned size, bool is_write)
1565{
1566 return is_write;
1567}
1568
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001569static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001570 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001571 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001572 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001573};
1574
pbrook0f459d12008-06-09 00:20:13 +00001575/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001576static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001577{
Andreas Färber93afead2013-08-26 03:41:01 +02001578 CPUState *cpu = current_cpu;
1579 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001580 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001581 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001582 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001583 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001584
Andreas Färberff4700b2013-08-26 18:23:18 +02001585 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001586 /* We re-entered the check after replacing the TB. Now raise
1587 * the debug interrupt so that is will trigger after the
1588 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001589 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001590 return;
1591 }
Andreas Färber93afead2013-08-26 03:41:01 +02001592 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001593 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001594 if ((vaddr == (wp->vaddr & len_mask) ||
1595 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001596 wp->flags |= BP_WATCHPOINT_HIT;
Andreas Färberff4700b2013-08-26 18:23:18 +02001597 if (!cpu->watchpoint_hit) {
1598 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001599 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001600 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001601 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001602 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001603 } else {
1604 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001605 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001606 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001607 }
aliguori06d55cc2008-11-18 20:24:06 +00001608 }
aliguori6e140f22008-11-18 20:37:55 +00001609 } else {
1610 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001611 }
1612 }
1613}
1614
pbrook6658ffb2007-03-16 23:58:11 +00001615/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1616 so these check for a hit then pass through to the normal out-of-line
1617 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001618static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001619 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001620{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001621 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1622 switch (size) {
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001623 case 1: return ldub_phys(&address_space_memory, addr);
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001624 case 2: return lduw_phys(&address_space_memory, addr);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001625 case 4: return ldl_phys(&address_space_memory, addr);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001626 default: abort();
1627 }
pbrook6658ffb2007-03-16 23:58:11 +00001628}
1629
Avi Kivitya8170e52012-10-23 12:30:10 +02001630static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001631 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001632{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001633 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1634 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001635 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001636 stb_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001637 break;
1638 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001639 stw_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001640 break;
1641 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001642 stl_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001643 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001644 default: abort();
1645 }
pbrook6658ffb2007-03-16 23:58:11 +00001646}
1647
Avi Kivity1ec9b902012-01-02 12:47:48 +02001648static const MemoryRegionOps watch_mem_ops = {
1649 .read = watch_mem_read,
1650 .write = watch_mem_write,
1651 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001652};
pbrook6658ffb2007-03-16 23:58:11 +00001653
Avi Kivitya8170e52012-10-23 12:30:10 +02001654static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001655 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001656{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001657 subpage_t *subpage = opaque;
1658 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001659
blueswir1db7b5422007-05-26 17:36:03 +00001660#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001661 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001662 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001663#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001664 address_space_read(subpage->as, addr + subpage->base, buf, len);
1665 switch (len) {
1666 case 1:
1667 return ldub_p(buf);
1668 case 2:
1669 return lduw_p(buf);
1670 case 4:
1671 return ldl_p(buf);
1672 default:
1673 abort();
1674 }
blueswir1db7b5422007-05-26 17:36:03 +00001675}
1676
Avi Kivitya8170e52012-10-23 12:30:10 +02001677static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001678 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001679{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001680 subpage_t *subpage = opaque;
1681 uint8_t buf[4];
1682
blueswir1db7b5422007-05-26 17:36:03 +00001683#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001684 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001685 " value %"PRIx64"\n",
1686 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001687#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001688 switch (len) {
1689 case 1:
1690 stb_p(buf, value);
1691 break;
1692 case 2:
1693 stw_p(buf, value);
1694 break;
1695 case 4:
1696 stl_p(buf, value);
1697 break;
1698 default:
1699 abort();
1700 }
1701 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001702}
1703
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001704static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001705 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001706{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001707 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001708#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001709 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001710 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001711#endif
1712
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001713 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001714 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001715}
1716
Avi Kivity70c68e42012-01-02 12:32:48 +02001717static const MemoryRegionOps subpage_ops = {
1718 .read = subpage_read,
1719 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001720 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001721 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001722};
1723
Anthony Liguoric227f092009-10-01 16:12:16 -05001724static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001725 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001726{
1727 int idx, eidx;
1728
1729 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1730 return -1;
1731 idx = SUBPAGE_IDX(start);
1732 eidx = SUBPAGE_IDX(end);
1733#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001734 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1735 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001736#endif
blueswir1db7b5422007-05-26 17:36:03 +00001737 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001738 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001739 }
1740
1741 return 0;
1742}
1743
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001744static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001745{
Anthony Liguoric227f092009-10-01 16:12:16 -05001746 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001747
Anthony Liguori7267c092011-08-20 22:09:37 -05001748 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001749
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001750 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001751 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001752 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001753 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001754 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001755#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001756 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1757 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001758#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001759 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001760
1761 return mmio;
1762}
1763
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001764static uint16_t dummy_section(PhysPageMap *map, MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001765{
1766 MemoryRegionSection section = {
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001767 .address_space = &address_space_memory,
Avi Kivity5312bd82012-02-12 18:32:55 +02001768 .mr = mr,
1769 .offset_within_address_space = 0,
1770 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001771 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001772 };
1773
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001774 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001775}
1776
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001777MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001778{
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001779 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001780}
1781
Avi Kivitye9179ce2009-06-14 11:38:52 +03001782static void io_mem_init(void)
1783{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001784 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1785 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001786 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001787 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001788 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001789 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001790 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001791}
1792
Avi Kivityac1970f2012-10-03 16:22:53 +02001793static void mem_begin(MemoryListener *listener)
1794{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001795 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001796 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1797 uint16_t n;
1798
1799 n = dummy_section(&d->map, &io_mem_unassigned);
1800 assert(n == PHYS_SECTION_UNASSIGNED);
1801 n = dummy_section(&d->map, &io_mem_notdirty);
1802 assert(n == PHYS_SECTION_NOTDIRTY);
1803 n = dummy_section(&d->map, &io_mem_rom);
1804 assert(n == PHYS_SECTION_ROM);
1805 n = dummy_section(&d->map, &io_mem_watch);
1806 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001807
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001808 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001809 d->as = as;
1810 as->next_dispatch = d;
1811}
1812
1813static void mem_commit(MemoryListener *listener)
1814{
1815 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001816 AddressSpaceDispatch *cur = as->dispatch;
1817 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001818
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001819 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001820
Paolo Bonzini0475d942013-05-29 12:28:21 +02001821 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001822
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001823 if (cur) {
1824 phys_sections_free(&cur->map);
1825 g_free(cur);
1826 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001827}
1828
Avi Kivity1d711482012-10-02 18:54:45 +02001829static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001830{
Andreas Färber182735e2013-05-29 22:29:20 +02001831 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001832
1833 /* since each CPU stores ram addresses in its TLB cache, we must
1834 reset the modified entries */
1835 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001836 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +02001837 CPUArchState *env = cpu->env_ptr;
1838
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01001839 /* FIXME: Disentangle the cpu.h circular files deps so we can
1840 directly get the right CPU from listener. */
1841 if (cpu->tcg_as_listener != listener) {
1842 continue;
1843 }
Avi Kivity117712c2012-02-12 21:23:17 +02001844 tlb_flush(env, 1);
1845 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001846}
1847
Avi Kivity93632742012-02-08 16:54:16 +02001848static void core_log_global_start(MemoryListener *listener)
1849{
Juan Quintela981fdf22013-10-10 11:54:09 +02001850 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001851}
1852
1853static void core_log_global_stop(MemoryListener *listener)
1854{
Juan Quintela981fdf22013-10-10 11:54:09 +02001855 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001856}
1857
Avi Kivity93632742012-02-08 16:54:16 +02001858static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001859 .log_global_start = core_log_global_start,
1860 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001861 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001862};
1863
Avi Kivityac1970f2012-10-03 16:22:53 +02001864void address_space_init_dispatch(AddressSpace *as)
1865{
Paolo Bonzini00752702013-05-29 12:13:54 +02001866 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001867 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001868 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001869 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001870 .region_add = mem_add,
1871 .region_nop = mem_add,
1872 .priority = 0,
1873 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001874 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001875}
1876
Avi Kivity83f3c252012-10-07 12:59:55 +02001877void address_space_destroy_dispatch(AddressSpace *as)
1878{
1879 AddressSpaceDispatch *d = as->dispatch;
1880
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001881 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001882 g_free(d);
1883 as->dispatch = NULL;
1884}
1885
Avi Kivity62152b82011-07-26 14:26:14 +03001886static void memory_map_init(void)
1887{
Anthony Liguori7267c092011-08-20 22:09:37 -05001888 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001889
Paolo Bonzini57271d62013-11-07 17:14:37 +01001890 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001891 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001892
Anthony Liguori7267c092011-08-20 22:09:37 -05001893 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001894 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1895 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001896 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001897
Avi Kivityf6790af2012-10-02 20:13:51 +02001898 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03001899}
1900
1901MemoryRegion *get_system_memory(void)
1902{
1903 return system_memory;
1904}
1905
Avi Kivity309cb472011-08-08 16:09:03 +03001906MemoryRegion *get_system_io(void)
1907{
1908 return system_io;
1909}
1910
pbrooke2eef172008-06-08 01:09:01 +00001911#endif /* !defined(CONFIG_USER_ONLY) */
1912
bellard13eb76e2004-01-24 15:23:36 +00001913/* physical memory access (slow version, mainly for debug) */
1914#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001915int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001916 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001917{
1918 int l, flags;
1919 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001920 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001921
1922 while (len > 0) {
1923 page = addr & TARGET_PAGE_MASK;
1924 l = (page + TARGET_PAGE_SIZE) - addr;
1925 if (l > len)
1926 l = len;
1927 flags = page_get_flags(page);
1928 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001929 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001930 if (is_write) {
1931 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001932 return -1;
bellard579a97f2007-11-11 14:26:47 +00001933 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001934 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001935 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001936 memcpy(p, buf, l);
1937 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001938 } else {
1939 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001940 return -1;
bellard579a97f2007-11-11 14:26:47 +00001941 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001942 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001943 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001944 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001945 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001946 }
1947 len -= l;
1948 buf += l;
1949 addr += l;
1950 }
Paul Brooka68fe892010-03-01 00:08:59 +00001951 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001952}
bellard8df1cd02005-01-28 22:37:22 +00001953
bellard13eb76e2004-01-24 15:23:36 +00001954#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001955
Avi Kivitya8170e52012-10-23 12:30:10 +02001956static void invalidate_and_set_dirty(hwaddr addr,
1957 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001958{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001959 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001960 /* invalidate code */
1961 tb_invalidate_phys_page_range(addr, addr + length, 0);
1962 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02001963 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1964 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001965 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001966 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001967}
1968
Richard Henderson23326162013-07-08 14:55:59 -07001969static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001970{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001971 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001972
1973 /* Regions are assumed to support 1-4 byte accesses unless
1974 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001975 if (access_size_max == 0) {
1976 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001977 }
Richard Henderson23326162013-07-08 14:55:59 -07001978
1979 /* Bound the maximum access by the alignment of the address. */
1980 if (!mr->ops->impl.unaligned) {
1981 unsigned align_size_max = addr & -addr;
1982 if (align_size_max != 0 && align_size_max < access_size_max) {
1983 access_size_max = align_size_max;
1984 }
1985 }
1986
1987 /* Don't attempt accesses larger than the maximum. */
1988 if (l > access_size_max) {
1989 l = access_size_max;
1990 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001991 if (l & (l - 1)) {
1992 l = 1 << (qemu_fls(l) - 1);
1993 }
Richard Henderson23326162013-07-08 14:55:59 -07001994
1995 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001996}
1997
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001998bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001999 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002000{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002001 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002002 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002003 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002004 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002005 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002006 bool error = false;
ths3b46e622007-09-17 08:09:54 +00002007
bellard13eb76e2004-01-24 15:23:36 +00002008 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002009 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002010 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002011
bellard13eb76e2004-01-24 15:23:36 +00002012 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002013 if (!memory_access_is_direct(mr, is_write)) {
2014 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002015 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002016 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002017 switch (l) {
2018 case 8:
2019 /* 64 bit write access */
2020 val = ldq_p(buf);
2021 error |= io_mem_write(mr, addr1, val, 8);
2022 break;
2023 case 4:
bellard1c213d12005-09-03 10:49:04 +00002024 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002025 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002026 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002027 break;
2028 case 2:
bellard1c213d12005-09-03 10:49:04 +00002029 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002030 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002031 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002032 break;
2033 case 1:
bellard1c213d12005-09-03 10:49:04 +00002034 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002035 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002036 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002037 break;
2038 default:
2039 abort();
bellard13eb76e2004-01-24 15:23:36 +00002040 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002041 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002042 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002043 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002044 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002045 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002046 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002047 }
2048 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002049 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002050 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002051 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002052 switch (l) {
2053 case 8:
2054 /* 64 bit read access */
2055 error |= io_mem_read(mr, addr1, &val, 8);
2056 stq_p(buf, val);
2057 break;
2058 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002059 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002060 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002061 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002062 break;
2063 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002064 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002065 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002066 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002067 break;
2068 case 1:
bellard1c213d12005-09-03 10:49:04 +00002069 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002070 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002071 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002072 break;
2073 default:
2074 abort();
bellard13eb76e2004-01-24 15:23:36 +00002075 }
2076 } else {
2077 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002078 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002079 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002080 }
2081 }
2082 len -= l;
2083 buf += l;
2084 addr += l;
2085 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002086
2087 return error;
bellard13eb76e2004-01-24 15:23:36 +00002088}
bellard8df1cd02005-01-28 22:37:22 +00002089
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002090bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002091 const uint8_t *buf, int len)
2092{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002093 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002094}
2095
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002096bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002097{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002098 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002099}
2100
2101
Avi Kivitya8170e52012-10-23 12:30:10 +02002102void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002103 int len, int is_write)
2104{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002105 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002106}
2107
Alexander Graf582b55a2013-12-11 14:17:44 +01002108enum write_rom_type {
2109 WRITE_DATA,
2110 FLUSH_CACHE,
2111};
2112
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002113static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002114 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002115{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002116 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002117 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002118 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002119 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002120
bellardd0ecd2a2006-04-23 17:14:48 +00002121 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002122 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002123 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002124
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002125 if (!(memory_region_is_ram(mr) ||
2126 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002127 /* do nothing */
2128 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002129 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002130 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002131 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002132 switch (type) {
2133 case WRITE_DATA:
2134 memcpy(ptr, buf, l);
2135 invalidate_and_set_dirty(addr1, l);
2136 break;
2137 case FLUSH_CACHE:
2138 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2139 break;
2140 }
bellardd0ecd2a2006-04-23 17:14:48 +00002141 }
2142 len -= l;
2143 buf += l;
2144 addr += l;
2145 }
2146}
2147
Alexander Graf582b55a2013-12-11 14:17:44 +01002148/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002149void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002150 const uint8_t *buf, int len)
2151{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002152 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002153}
2154
2155void cpu_flush_icache_range(hwaddr start, int len)
2156{
2157 /*
2158 * This function should do the same thing as an icache flush that was
2159 * triggered from within the guest. For TCG we are always cache coherent,
2160 * so there is no need to flush anything. For KVM / Xen we need to flush
2161 * the host's instruction cache at least.
2162 */
2163 if (tcg_enabled()) {
2164 return;
2165 }
2166
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002167 cpu_physical_memory_write_rom_internal(&address_space_memory,
2168 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002169}
2170
aliguori6d16c2f2009-01-22 16:59:11 +00002171typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002172 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002173 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002174 hwaddr addr;
2175 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002176} BounceBuffer;
2177
2178static BounceBuffer bounce;
2179
aliguoriba223c22009-01-22 16:59:16 +00002180typedef struct MapClient {
2181 void *opaque;
2182 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002183 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002184} MapClient;
2185
Blue Swirl72cf2d42009-09-12 07:36:22 +00002186static QLIST_HEAD(map_client_list, MapClient) map_client_list
2187 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002188
2189void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2190{
Anthony Liguori7267c092011-08-20 22:09:37 -05002191 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002192
2193 client->opaque = opaque;
2194 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002195 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002196 return client;
2197}
2198
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002199static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002200{
2201 MapClient *client = (MapClient *)_client;
2202
Blue Swirl72cf2d42009-09-12 07:36:22 +00002203 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002204 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002205}
2206
2207static void cpu_notify_map_clients(void)
2208{
2209 MapClient *client;
2210
Blue Swirl72cf2d42009-09-12 07:36:22 +00002211 while (!QLIST_EMPTY(&map_client_list)) {
2212 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002213 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002214 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002215 }
2216}
2217
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002218bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2219{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002220 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002221 hwaddr l, xlat;
2222
2223 while (len > 0) {
2224 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002225 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2226 if (!memory_access_is_direct(mr, is_write)) {
2227 l = memory_access_size(mr, l, addr);
2228 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002229 return false;
2230 }
2231 }
2232
2233 len -= l;
2234 addr += l;
2235 }
2236 return true;
2237}
2238
aliguori6d16c2f2009-01-22 16:59:11 +00002239/* Map a physical memory region into a host virtual address.
2240 * May map a subset of the requested range, given by and returned in *plen.
2241 * May return NULL if resources needed to perform the mapping are exhausted.
2242 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002243 * Use cpu_register_map_client() to know when retrying the map operation is
2244 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002245 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002246void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002247 hwaddr addr,
2248 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002249 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002250{
Avi Kivitya8170e52012-10-23 12:30:10 +02002251 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002252 hwaddr done = 0;
2253 hwaddr l, xlat, base;
2254 MemoryRegion *mr, *this_mr;
2255 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002256
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002257 if (len == 0) {
2258 return NULL;
2259 }
aliguori6d16c2f2009-01-22 16:59:11 +00002260
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002261 l = len;
2262 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2263 if (!memory_access_is_direct(mr, is_write)) {
2264 if (bounce.buffer) {
2265 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002266 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002267 /* Avoid unbounded allocations */
2268 l = MIN(l, TARGET_PAGE_SIZE);
2269 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002270 bounce.addr = addr;
2271 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002272
2273 memory_region_ref(mr);
2274 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002275 if (!is_write) {
2276 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002277 }
aliguori6d16c2f2009-01-22 16:59:11 +00002278
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002279 *plen = l;
2280 return bounce.buffer;
2281 }
2282
2283 base = xlat;
2284 raddr = memory_region_get_ram_addr(mr);
2285
2286 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002287 len -= l;
2288 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002289 done += l;
2290 if (len == 0) {
2291 break;
2292 }
2293
2294 l = len;
2295 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2296 if (this_mr != mr || xlat != base + done) {
2297 break;
2298 }
aliguori6d16c2f2009-01-22 16:59:11 +00002299 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002300
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002301 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002302 *plen = done;
2303 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002304}
2305
Avi Kivityac1970f2012-10-03 16:22:53 +02002306/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002307 * Will also mark the memory as dirty if is_write == 1. access_len gives
2308 * the amount of memory that was actually read or written by the caller.
2309 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002310void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2311 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002312{
2313 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002314 MemoryRegion *mr;
2315 ram_addr_t addr1;
2316
2317 mr = qemu_ram_addr_from_host(buffer, &addr1);
2318 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002319 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002320 while (access_len) {
2321 unsigned l;
2322 l = TARGET_PAGE_SIZE;
2323 if (l > access_len)
2324 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002325 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002326 addr1 += l;
2327 access_len -= l;
2328 }
2329 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002330 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002331 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002332 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002333 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002334 return;
2335 }
2336 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002337 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002338 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002339 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002340 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002341 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002342 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002343}
bellardd0ecd2a2006-04-23 17:14:48 +00002344
Avi Kivitya8170e52012-10-23 12:30:10 +02002345void *cpu_physical_memory_map(hwaddr addr,
2346 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002347 int is_write)
2348{
2349 return address_space_map(&address_space_memory, addr, plen, is_write);
2350}
2351
Avi Kivitya8170e52012-10-23 12:30:10 +02002352void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2353 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002354{
2355 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2356}
2357
bellard8df1cd02005-01-28 22:37:22 +00002358/* warning: addr must be aligned */
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002359static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002360 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002361{
bellard8df1cd02005-01-28 22:37:22 +00002362 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002363 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002364 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002365 hwaddr l = 4;
2366 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002367
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002368 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002369 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002370 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002371 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002372#if defined(TARGET_WORDS_BIGENDIAN)
2373 if (endian == DEVICE_LITTLE_ENDIAN) {
2374 val = bswap32(val);
2375 }
2376#else
2377 if (endian == DEVICE_BIG_ENDIAN) {
2378 val = bswap32(val);
2379 }
2380#endif
bellard8df1cd02005-01-28 22:37:22 +00002381 } else {
2382 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002383 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002384 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002385 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002386 switch (endian) {
2387 case DEVICE_LITTLE_ENDIAN:
2388 val = ldl_le_p(ptr);
2389 break;
2390 case DEVICE_BIG_ENDIAN:
2391 val = ldl_be_p(ptr);
2392 break;
2393 default:
2394 val = ldl_p(ptr);
2395 break;
2396 }
bellard8df1cd02005-01-28 22:37:22 +00002397 }
2398 return val;
2399}
2400
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002401uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002402{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002403 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002404}
2405
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002406uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002407{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002408 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002409}
2410
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002411uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002412{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002413 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002414}
2415
bellard84b7b8e2005-11-28 21:19:04 +00002416/* warning: addr must be aligned */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002417static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002418 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002419{
bellard84b7b8e2005-11-28 21:19:04 +00002420 uint8_t *ptr;
2421 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002422 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002423 hwaddr l = 8;
2424 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002425
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002426 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002427 false);
2428 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002429 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002430 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002431#if defined(TARGET_WORDS_BIGENDIAN)
2432 if (endian == DEVICE_LITTLE_ENDIAN) {
2433 val = bswap64(val);
2434 }
2435#else
2436 if (endian == DEVICE_BIG_ENDIAN) {
2437 val = bswap64(val);
2438 }
2439#endif
bellard84b7b8e2005-11-28 21:19:04 +00002440 } else {
2441 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002442 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002443 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002444 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002445 switch (endian) {
2446 case DEVICE_LITTLE_ENDIAN:
2447 val = ldq_le_p(ptr);
2448 break;
2449 case DEVICE_BIG_ENDIAN:
2450 val = ldq_be_p(ptr);
2451 break;
2452 default:
2453 val = ldq_p(ptr);
2454 break;
2455 }
bellard84b7b8e2005-11-28 21:19:04 +00002456 }
2457 return val;
2458}
2459
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002460uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002461{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002462 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002463}
2464
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002465uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002466{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002467 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002468}
2469
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002470uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002471{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002472 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002473}
2474
bellardaab33092005-10-30 20:48:42 +00002475/* XXX: optimize */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002476uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002477{
2478 uint8_t val;
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002479 address_space_rw(as, addr, &val, 1, 0);
bellardaab33092005-10-30 20:48:42 +00002480 return val;
2481}
2482
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002483/* warning: addr must be aligned */
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002484static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002485 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002486{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002487 uint8_t *ptr;
2488 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002489 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002490 hwaddr l = 2;
2491 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002492
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002493 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002494 false);
2495 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002496 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002497 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002498#if defined(TARGET_WORDS_BIGENDIAN)
2499 if (endian == DEVICE_LITTLE_ENDIAN) {
2500 val = bswap16(val);
2501 }
2502#else
2503 if (endian == DEVICE_BIG_ENDIAN) {
2504 val = bswap16(val);
2505 }
2506#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002507 } else {
2508 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002509 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002510 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002511 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002512 switch (endian) {
2513 case DEVICE_LITTLE_ENDIAN:
2514 val = lduw_le_p(ptr);
2515 break;
2516 case DEVICE_BIG_ENDIAN:
2517 val = lduw_be_p(ptr);
2518 break;
2519 default:
2520 val = lduw_p(ptr);
2521 break;
2522 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002523 }
2524 return val;
bellardaab33092005-10-30 20:48:42 +00002525}
2526
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002527uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002528{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002529 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002530}
2531
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002532uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002533{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002534 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002535}
2536
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002537uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002538{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002539 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002540}
2541
bellard8df1cd02005-01-28 22:37:22 +00002542/* warning: addr must be aligned. The ram page is not masked as dirty
2543 and the code inside is not invalidated. It is useful if the dirty
2544 bits are used to track modified PTEs */
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002545void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002546{
bellard8df1cd02005-01-28 22:37:22 +00002547 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002548 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002549 hwaddr l = 4;
2550 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002551
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002552 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002553 true);
2554 if (l < 4 || !memory_access_is_direct(mr, true)) {
2555 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002556 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002557 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002558 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002559 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002560
2561 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002562 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002563 /* invalidate code */
2564 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2565 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02002566 cpu_physical_memory_set_dirty_flag(addr1,
2567 DIRTY_MEMORY_MIGRATION);
2568 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
aliguori74576192008-10-06 14:02:03 +00002569 }
2570 }
bellard8df1cd02005-01-28 22:37:22 +00002571 }
2572}
2573
2574/* warning: addr must be aligned */
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002575static inline void stl_phys_internal(AddressSpace *as,
2576 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002577 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002578{
bellard8df1cd02005-01-28 22:37:22 +00002579 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002580 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002581 hwaddr l = 4;
2582 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002583
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002584 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002585 true);
2586 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002587#if defined(TARGET_WORDS_BIGENDIAN)
2588 if (endian == DEVICE_LITTLE_ENDIAN) {
2589 val = bswap32(val);
2590 }
2591#else
2592 if (endian == DEVICE_BIG_ENDIAN) {
2593 val = bswap32(val);
2594 }
2595#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002596 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002597 } else {
bellard8df1cd02005-01-28 22:37:22 +00002598 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002599 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002600 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002601 switch (endian) {
2602 case DEVICE_LITTLE_ENDIAN:
2603 stl_le_p(ptr, val);
2604 break;
2605 case DEVICE_BIG_ENDIAN:
2606 stl_be_p(ptr, val);
2607 break;
2608 default:
2609 stl_p(ptr, val);
2610 break;
2611 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002612 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002613 }
2614}
2615
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002616void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002617{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002618 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002619}
2620
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002621void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002622{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002623 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002624}
2625
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002626void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002627{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002628 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002629}
2630
bellardaab33092005-10-30 20:48:42 +00002631/* XXX: optimize */
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002632void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002633{
2634 uint8_t v = val;
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002635 address_space_rw(as, addr, &v, 1, 1);
bellardaab33092005-10-30 20:48:42 +00002636}
2637
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002638/* warning: addr must be aligned */
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002639static inline void stw_phys_internal(AddressSpace *as,
2640 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002641 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002642{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002643 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002644 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002645 hwaddr l = 2;
2646 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002647
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002648 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002649 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002650#if defined(TARGET_WORDS_BIGENDIAN)
2651 if (endian == DEVICE_LITTLE_ENDIAN) {
2652 val = bswap16(val);
2653 }
2654#else
2655 if (endian == DEVICE_BIG_ENDIAN) {
2656 val = bswap16(val);
2657 }
2658#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002659 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002660 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002661 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002662 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002663 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002664 switch (endian) {
2665 case DEVICE_LITTLE_ENDIAN:
2666 stw_le_p(ptr, val);
2667 break;
2668 case DEVICE_BIG_ENDIAN:
2669 stw_be_p(ptr, val);
2670 break;
2671 default:
2672 stw_p(ptr, val);
2673 break;
2674 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002675 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002676 }
bellardaab33092005-10-30 20:48:42 +00002677}
2678
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002679void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002680{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002681 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002682}
2683
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002684void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002685{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002686 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002687}
2688
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002689void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002690{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002691 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002692}
2693
bellardaab33092005-10-30 20:48:42 +00002694/* XXX: optimize */
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002695void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002696{
2697 val = tswap64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002698 address_space_rw(as, addr, (void *) &val, 8, 1);
bellardaab33092005-10-30 20:48:42 +00002699}
2700
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002701void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002702{
2703 val = cpu_to_le64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002704 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002705}
2706
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002707void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002708{
2709 val = cpu_to_be64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002710 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002711}
2712
aliguori5e2972f2009-03-28 17:51:36 +00002713/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002714int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002715 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002716{
2717 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002718 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002719 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002720
2721 while (len > 0) {
2722 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002723 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002724 /* if no physical page mapped, return an error */
2725 if (phys_addr == -1)
2726 return -1;
2727 l = (page + TARGET_PAGE_SIZE) - addr;
2728 if (l > len)
2729 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002730 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10002731 if (is_write) {
2732 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2733 } else {
2734 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2735 }
bellard13eb76e2004-01-24 15:23:36 +00002736 len -= l;
2737 buf += l;
2738 addr += l;
2739 }
2740 return 0;
2741}
Paul Brooka68fe892010-03-01 00:08:59 +00002742#endif
bellard13eb76e2004-01-24 15:23:36 +00002743
Blue Swirl8e4a4242013-01-06 18:30:17 +00002744#if !defined(CONFIG_USER_ONLY)
2745
2746/*
2747 * A helper function for the _utterly broken_ virtio device model to find out if
2748 * it's running on a big endian machine. Don't do this at home kids!
2749 */
2750bool virtio_is_big_endian(void);
2751bool virtio_is_big_endian(void)
2752{
2753#if defined(TARGET_WORDS_BIGENDIAN)
2754 return true;
2755#else
2756 return false;
2757#endif
2758}
2759
2760#endif
2761
Wen Congyang76f35532012-05-07 12:04:18 +08002762#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002763bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002764{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002765 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002766 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002767
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002768 mr = address_space_translate(&address_space_memory,
2769 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002770
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002771 return !(memory_region_is_ram(mr) ||
2772 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002773}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002774
2775void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2776{
2777 RAMBlock *block;
2778
2779 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2780 func(block->host, block->offset, block->length, opaque);
2781 }
2782}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002783#endif