blob: dae50a1aa0b0200022aa94c7add87dbdfd9e2772 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060029#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010031#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020032#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010033#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010034#include "qemu/timer.h"
35#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020036#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010037#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010038#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000040#if defined(CONFIG_USER_ONLY)
41#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010042#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010043#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010044#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000045#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010046#include "exec/cpu-all.h"
bellard54936002003-05-13 00:25:15 +000047
Paolo Bonzini022c62c2012-12-17 18:19:49 +010048#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000049#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000050
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020052#include "exec/ram_addr.h"
Alexander Graf582b55a2013-12-11 14:17:44 +010053#include "qemu/cache-utils.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020054
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020055#include "qemu/range.h"
56
blueswir1db7b5422007-05-26 17:36:03 +000057//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000058
pbrook99773bd2006-04-16 15:14:59 +000059#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020060static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000061
Paolo Bonzinia3161032012-11-14 15:54:48 +010062RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030063
64static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030065static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030066
Avi Kivityf6790af2012-10-02 20:13:51 +020067AddressSpace address_space_io;
68AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020069
Paolo Bonzini0844e002013-05-24 14:37:28 +020070MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020071static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020072
pbrooke2eef172008-06-08 01:09:01 +000073#endif
bellard9fa3e852004-01-04 18:06:42 +000074
Andreas Färberbdc44642013-06-24 23:50:24 +020075struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000076/* current CPU in the current thread. It is only valid inside
77 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020078DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000079/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000080 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000081 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010082int use_icount;
bellard6a00d602005-11-21 23:25:50 +000083
pbrooke2eef172008-06-08 01:09:01 +000084#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020085
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020086typedef struct PhysPageEntry PhysPageEntry;
87
88struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020089 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020090 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +020091 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020092 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +020093};
94
Michael S. Tsirkin8b795762013-11-11 14:51:56 +020095#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
96
Paolo Bonzini03f49952013-11-07 17:14:36 +010097/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +010098#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +010099
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200100#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100101#define P_L2_SIZE (1 << P_L2_BITS)
102
103#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
104
105typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200106
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200107typedef struct PhysPageMap {
108 unsigned sections_nb;
109 unsigned sections_nb_alloc;
110 unsigned nodes_nb;
111 unsigned nodes_nb_alloc;
112 Node *nodes;
113 MemoryRegionSection *sections;
114} PhysPageMap;
115
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200116struct AddressSpaceDispatch {
117 /* This is a multi-level map on the physical address space.
118 * The bottom level has pointers to MemoryRegionSections.
119 */
120 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200121 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200122 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200123};
124
Jan Kiszka90260c62013-05-26 21:46:51 +0200125#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
126typedef struct subpage_t {
127 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200128 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200129 hwaddr base;
130 uint16_t sub_section[TARGET_PAGE_SIZE];
131} subpage_t;
132
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200133#define PHYS_SECTION_UNASSIGNED 0
134#define PHYS_SECTION_NOTDIRTY 1
135#define PHYS_SECTION_ROM 2
136#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200137
pbrooke2eef172008-06-08 01:09:01 +0000138static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300139static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000140static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000141
Avi Kivity1ec9b902012-01-02 12:47:48 +0200142static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000143#endif
bellard54936002003-05-13 00:25:15 +0000144
Paul Brook6d9a1302010-02-28 23:55:53 +0000145#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200146
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200148{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200149 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
150 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
151 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
152 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200153 }
154}
155
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200156static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200157{
158 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200159 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200160
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200161 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200162 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200163 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100164 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 map->nodes[ret][i].skip = 1;
166 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200169}
170
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200171static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
172 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200173 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200174{
175 PhysPageEntry *p;
176 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100177 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200178
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200179 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200180 lp->ptr = phys_map_node_alloc(map);
181 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200182 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100183 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200184 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200185 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186 }
187 }
188 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100191 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192
Paolo Bonzini03f49952013-11-07 17:14:36 +0100193 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200194 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200195 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200196 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200197 *index += step;
198 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200199 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200201 }
202 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200203 }
204}
205
Avi Kivityac1970f2012-10-03 16:22:53 +0200206static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200207 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200208 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000209{
Avi Kivity29990972012-02-13 20:21:20 +0200210 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200211 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000214}
215
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200216/* Compact a non leaf page entry. Simply detect that the entry has a single child,
217 * and update our entry so we can skip it and go directly to the destination.
218 */
219static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
220{
221 unsigned valid_ptr = P_L2_SIZE;
222 int valid = 0;
223 PhysPageEntry *p;
224 int i;
225
226 if (lp->ptr == PHYS_MAP_NODE_NIL) {
227 return;
228 }
229
230 p = nodes[lp->ptr];
231 for (i = 0; i < P_L2_SIZE; i++) {
232 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
233 continue;
234 }
235
236 valid_ptr = i;
237 valid++;
238 if (p[i].skip) {
239 phys_page_compact(&p[i], nodes, compacted);
240 }
241 }
242
243 /* We can only compress if there's only one child. */
244 if (valid != 1) {
245 return;
246 }
247
248 assert(valid_ptr < P_L2_SIZE);
249
250 /* Don't compress if it won't fit in the # of bits we have. */
251 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
252 return;
253 }
254
255 lp->ptr = p[valid_ptr].ptr;
256 if (!p[valid_ptr].skip) {
257 /* If our only child is a leaf, make this a leaf. */
258 /* By design, we should have made this node a leaf to begin with so we
259 * should never reach here.
260 * But since it's so simple to handle this, let's do it just in case we
261 * change this rule.
262 */
263 lp->skip = 0;
264 } else {
265 lp->skip += p[valid_ptr].skip;
266 }
267}
268
269static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
270{
271 DECLARE_BITMAP(compacted, nodes_nb);
272
273 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200274 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200275 }
276}
277
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200278static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200279 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000280{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200281 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200282 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200283 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200284
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200285 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200286 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200287 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200288 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200289 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100290 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200291 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200292
293 if (sections[lp.ptr].size.hi ||
294 range_covers_byte(sections[lp.ptr].offset_within_address_space,
295 sections[lp.ptr].size.lo, addr)) {
296 return &sections[lp.ptr];
297 } else {
298 return &sections[PHYS_SECTION_UNASSIGNED];
299 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200300}
301
Blue Swirle5548612012-04-21 13:08:33 +0000302bool memory_region_is_unassigned(MemoryRegion *mr)
303{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200304 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000305 && mr != &io_mem_watch;
306}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200307
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200308static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200309 hwaddr addr,
310 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200311{
Jan Kiszka90260c62013-05-26 21:46:51 +0200312 MemoryRegionSection *section;
313 subpage_t *subpage;
314
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200315 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200316 if (resolve_subpage && section->mr->subpage) {
317 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200318 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200319 }
320 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200321}
322
Jan Kiszka90260c62013-05-26 21:46:51 +0200323static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200324address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200325 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200326{
327 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100328 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200329
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200330 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200331 /* Compute offset within MemoryRegionSection */
332 addr -= section->offset_within_address_space;
333
334 /* Compute offset within MemoryRegion */
335 *xlat = addr + section->offset_within_region;
336
337 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100338 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200339 return section;
340}
Jan Kiszka90260c62013-05-26 21:46:51 +0200341
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100342static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
343{
344 if (memory_region_is_ram(mr)) {
345 return !(is_write && mr->readonly);
346 }
347 if (memory_region_is_romd(mr)) {
348 return !is_write;
349 }
350
351 return false;
352}
353
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200354MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
355 hwaddr *xlat, hwaddr *plen,
356 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200357{
Avi Kivity30951152012-10-30 13:47:46 +0200358 IOMMUTLBEntry iotlb;
359 MemoryRegionSection *section;
360 MemoryRegion *mr;
361 hwaddr len = *plen;
362
363 for (;;) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364 section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200365 mr = section->mr;
366
367 if (!mr->iommu_ops) {
368 break;
369 }
370
371 iotlb = mr->iommu_ops->translate(mr, addr);
372 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
373 | (addr & iotlb.addr_mask));
374 len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
375 if (!(iotlb.perm & (1 << is_write))) {
376 mr = &io_mem_unassigned;
377 break;
378 }
379
380 as = iotlb.target_as;
381 }
382
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000383 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100384 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
385 len = MIN(page, len);
386 }
387
Avi Kivity30951152012-10-30 13:47:46 +0200388 *plen = len;
389 *xlat = addr;
390 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200391}
392
393MemoryRegionSection *
394address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
395 hwaddr *plen)
396{
Avi Kivity30951152012-10-30 13:47:46 +0200397 MemoryRegionSection *section;
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200398 section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200399
400 assert(!section->mr->iommu_ops);
401 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200402}
bellard9fa3e852004-01-04 18:06:42 +0000403#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000404
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200405void cpu_exec_init_all(void)
406{
407#if !defined(CONFIG_USER_ONLY)
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700408 qemu_mutex_init(&ram_list.mutex);
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200409 memory_map_init();
410 io_mem_init();
411#endif
412}
413
Andreas Färberb170fce2013-01-20 20:23:22 +0100414#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000415
Juan Quintelae59fb372009-09-29 22:48:21 +0200416static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200417{
Andreas Färber259186a2013-01-17 18:51:17 +0100418 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200419
aurel323098dba2009-03-07 21:28:24 +0000420 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
421 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100422 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100423 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000424
425 return 0;
426}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200427
Andreas Färber1a1562f2013-06-17 04:09:11 +0200428const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200429 .name = "cpu_common",
430 .version_id = 1,
431 .minimum_version_id = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200432 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200433 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100434 VMSTATE_UINT32(halted, CPUState),
435 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200436 VMSTATE_END_OF_LIST()
437 }
438};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200439
pbrook9656f322008-07-01 20:01:19 +0000440#endif
441
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100442CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400443{
Andreas Färberbdc44642013-06-24 23:50:24 +0200444 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400445
Andreas Färberbdc44642013-06-24 23:50:24 +0200446 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100447 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200448 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100449 }
Glauber Costa950f1472009-06-09 12:15:18 -0400450 }
451
Andreas Färberbdc44642013-06-24 23:50:24 +0200452 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400453}
454
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000455#if !defined(CONFIG_USER_ONLY)
456void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
457{
458 /* We only support one address space per cpu at the moment. */
459 assert(cpu->as == as);
460
461 if (cpu->tcg_as_listener) {
462 memory_listener_unregister(cpu->tcg_as_listener);
463 } else {
464 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
465 }
466 cpu->tcg_as_listener->commit = tcg_commit;
467 memory_listener_register(cpu->tcg_as_listener, as);
468}
469#endif
470
Andreas Färber9349b4f2012-03-14 01:38:32 +0100471void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000472{
Andreas Färber9f09e182012-05-03 06:59:07 +0200473 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100474 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200475 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000476 int cpu_index;
477
pbrookc2764712009-03-07 15:24:59 +0000478#if defined(CONFIG_USER_ONLY)
479 cpu_list_lock();
480#endif
bellard6a00d602005-11-21 23:25:50 +0000481 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200482 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000483 cpu_index++;
484 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100485 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100486 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200487 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200488 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100489#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000490 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200491 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100492#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200493 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000494#if defined(CONFIG_USER_ONLY)
495 cpu_list_unlock();
496#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200497 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
498 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
499 }
pbrookb3c77242008-06-30 16:31:04 +0000500#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600501 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000502 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100503 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200504 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000505#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100506 if (cc->vmsd != NULL) {
507 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
508 }
bellardfd6ce8f2003-05-14 19:00:11 +0000509}
510
bellard1fddef42005-04-17 19:16:13 +0000511#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +0000512#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200513static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000514{
515 tb_invalidate_phys_page_range(pc, pc + 1, 0);
516}
517#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200518static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400519{
Max Filippove8262a12013-09-27 22:29:17 +0400520 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
521 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000522 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100523 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400524 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400525}
bellardc27004e2005-01-03 23:35:10 +0000526#endif
Paul Brook94df27f2010-02-28 23:47:45 +0000527#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +0000528
Paul Brookc527ee82010-03-01 03:31:14 +0000529#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200530void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000531
532{
533}
534
Andreas Färber75a34032013-09-02 16:57:02 +0200535int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000536 int flags, CPUWatchpoint **watchpoint)
537{
538 return -ENOSYS;
539}
540#else
pbrook6658ffb2007-03-16 23:58:11 +0000541/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200542int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000543 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000544{
Andreas Färber75a34032013-09-02 16:57:02 +0200545 vaddr len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +0000546 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000547
aliguorib4051332008-11-18 20:14:20 +0000548 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +0400549 if ((len & (len - 1)) || (addr & ~len_mask) ||
550 len == 0 || len > TARGET_PAGE_SIZE) {
Andreas Färber75a34032013-09-02 16:57:02 +0200551 error_report("tried to set invalid watchpoint at %"
552 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000553 return -EINVAL;
554 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500555 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000556
aliguoria1d1bb32008-11-18 20:07:32 +0000557 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +0000558 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +0000559 wp->flags = flags;
560
aliguori2dc9f412008-11-18 20:56:59 +0000561 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200562 if (flags & BP_GDB) {
563 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
564 } else {
565 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
566 }
aliguoria1d1bb32008-11-18 20:07:32 +0000567
Andreas Färber31b030d2013-09-04 01:29:02 +0200568 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000569
570 if (watchpoint)
571 *watchpoint = wp;
572 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000573}
574
aliguoria1d1bb32008-11-18 20:07:32 +0000575/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200576int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000577 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000578{
Andreas Färber75a34032013-09-02 16:57:02 +0200579 vaddr len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +0000580 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000581
Andreas Färberff4700b2013-08-26 18:23:18 +0200582 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +0000583 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +0000584 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200585 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000586 return 0;
587 }
588 }
aliguoria1d1bb32008-11-18 20:07:32 +0000589 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000590}
591
aliguoria1d1bb32008-11-18 20:07:32 +0000592/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200593void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000594{
Andreas Färberff4700b2013-08-26 18:23:18 +0200595 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000596
Andreas Färber31b030d2013-09-04 01:29:02 +0200597 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000598
Anthony Liguori7267c092011-08-20 22:09:37 -0500599 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000600}
601
aliguoria1d1bb32008-11-18 20:07:32 +0000602/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200603void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000604{
aliguoric0ce9982008-11-25 22:13:57 +0000605 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000606
Andreas Färberff4700b2013-08-26 18:23:18 +0200607 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200608 if (wp->flags & mask) {
609 cpu_watchpoint_remove_by_ref(cpu, wp);
610 }
aliguoric0ce9982008-11-25 22:13:57 +0000611 }
aliguoria1d1bb32008-11-18 20:07:32 +0000612}
Paul Brookc527ee82010-03-01 03:31:14 +0000613#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000614
615/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200616int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000617 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000618{
bellard1fddef42005-04-17 19:16:13 +0000619#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000620 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000621
Anthony Liguori7267c092011-08-20 22:09:37 -0500622 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000623
624 bp->pc = pc;
625 bp->flags = flags;
626
aliguori2dc9f412008-11-18 20:56:59 +0000627 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200628 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200629 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200630 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200631 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200632 }
aliguoria1d1bb32008-11-18 20:07:32 +0000633
Andreas Färberf0c3c502013-08-26 21:22:53 +0200634 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000635
Andreas Färber00b941e2013-06-29 18:55:54 +0200636 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000637 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200638 }
aliguoria1d1bb32008-11-18 20:07:32 +0000639 return 0;
640#else
641 return -ENOSYS;
642#endif
643}
644
645/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200646int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000647{
648#if defined(TARGET_HAS_ICE)
649 CPUBreakpoint *bp;
650
Andreas Färberf0c3c502013-08-26 21:22:53 +0200651 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000652 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200653 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000654 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000655 }
bellard4c3a88a2003-07-26 12:06:08 +0000656 }
aliguoria1d1bb32008-11-18 20:07:32 +0000657 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000658#else
aliguoria1d1bb32008-11-18 20:07:32 +0000659 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +0000660#endif
661}
662
aliguoria1d1bb32008-11-18 20:07:32 +0000663/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200664void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000665{
bellard1fddef42005-04-17 19:16:13 +0000666#if defined(TARGET_HAS_ICE)
Andreas Färberf0c3c502013-08-26 21:22:53 +0200667 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
668
669 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000670
Anthony Liguori7267c092011-08-20 22:09:37 -0500671 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000672#endif
673}
674
675/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200676void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000677{
678#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +0000679 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000680
Andreas Färberf0c3c502013-08-26 21:22:53 +0200681 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200682 if (bp->flags & mask) {
683 cpu_breakpoint_remove_by_ref(cpu, bp);
684 }
aliguoric0ce9982008-11-25 22:13:57 +0000685 }
bellard4c3a88a2003-07-26 12:06:08 +0000686#endif
687}
688
bellardc33a3462003-07-29 20:50:33 +0000689/* enable or disable single step mode. EXCP_DEBUG is returned by the
690 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200691void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000692{
bellard1fddef42005-04-17 19:16:13 +0000693#if defined(TARGET_HAS_ICE)
Andreas Färbered2803d2013-06-21 20:20:45 +0200694 if (cpu->singlestep_enabled != enabled) {
695 cpu->singlestep_enabled = enabled;
696 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200697 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200698 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100699 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000700 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200701 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000702 tb_flush(env);
703 }
bellardc33a3462003-07-29 20:50:33 +0000704 }
705#endif
706}
707
Andreas Färbera47dddd2013-09-03 17:38:47 +0200708void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000709{
710 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000711 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000712
713 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000714 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000715 fprintf(stderr, "qemu: fatal: ");
716 vfprintf(stderr, fmt, ap);
717 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200718 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000719 if (qemu_log_enabled()) {
720 qemu_log("qemu: fatal: ");
721 qemu_log_vprintf(fmt, ap2);
722 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200723 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000724 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000725 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000726 }
pbrook493ae1f2007-11-23 16:53:59 +0000727 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000728 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200729#if defined(CONFIG_USER_ONLY)
730 {
731 struct sigaction act;
732 sigfillset(&act.sa_mask);
733 act.sa_handler = SIG_DFL;
734 sigaction(SIGABRT, &act, NULL);
735 }
736#endif
bellard75012672003-06-21 13:11:07 +0000737 abort();
738}
739
bellard01243112004-01-04 15:48:17 +0000740#if !defined(CONFIG_USER_ONLY)
Paolo Bonzini041603f2013-09-09 17:49:45 +0200741static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
742{
743 RAMBlock *block;
744
745 /* The list is protected by the iothread lock here. */
746 block = ram_list.mru_block;
747 if (block && addr - block->offset < block->length) {
748 goto found;
749 }
750 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
751 if (addr - block->offset < block->length) {
752 goto found;
753 }
754 }
755
756 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
757 abort();
758
759found:
760 ram_list.mru_block = block;
761 return block;
762}
763
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200764static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000765{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200766 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200767 RAMBlock *block;
768 ram_addr_t end;
769
770 end = TARGET_PAGE_ALIGN(start + length);
771 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000772
Paolo Bonzini041603f2013-09-09 17:49:45 +0200773 block = qemu_get_ram_block(start);
774 assert(block == qemu_get_ram_block(end - 1));
775 start1 = (uintptr_t)block->host + (start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000776 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200777}
778
779/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200780void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200781 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200782{
Juan Quintelad24981d2012-05-22 00:42:40 +0200783 if (length == 0)
784 return;
Juan Quintelaace694c2013-10-09 10:36:56 +0200785 cpu_physical_memory_clear_dirty_range(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200786
787 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200788 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200789 }
bellard1ccde1c2004-02-06 19:46:14 +0000790}
791
Juan Quintela981fdf22013-10-10 11:54:09 +0200792static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000793{
794 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000795}
796
Andreas Färberbb0e6272013-09-03 13:32:01 +0200797hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200798 MemoryRegionSection *section,
799 target_ulong vaddr,
800 hwaddr paddr, hwaddr xlat,
801 int prot,
802 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000803{
Avi Kivitya8170e52012-10-23 12:30:10 +0200804 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000805 CPUWatchpoint *wp;
806
Blue Swirlcc5bea62012-04-14 14:56:48 +0000807 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000808 /* Normal RAM. */
809 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200810 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000811 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200812 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000813 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200814 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000815 }
816 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100817 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200818 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000819 }
820
821 /* Make accesses to pages with watchpoints go via the
822 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200823 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Blue Swirle5548612012-04-21 13:08:33 +0000824 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
825 /* Avoid trapping reads of pages with a write breakpoint. */
826 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200827 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000828 *address |= TLB_MMIO;
829 break;
830 }
831 }
832 }
833
834 return iotlb;
835}
bellard9fa3e852004-01-04 18:06:42 +0000836#endif /* defined(CONFIG_USER_ONLY) */
837
pbrooke2eef172008-06-08 01:09:01 +0000838#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000839
Anthony Liguoric227f092009-10-01 16:12:16 -0500840static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200841 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200842static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200843
Stefan Weil575ddeb2013-09-29 20:56:45 +0200844static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200845
846/*
847 * Set a custom physical guest memory alloator.
848 * Accelerators with unusual needs may need this. Hopefully, we can
849 * get rid of it eventually.
850 */
Stefan Weil575ddeb2013-09-29 20:56:45 +0200851void phys_mem_set_alloc(void *(*alloc)(size_t))
Markus Armbruster91138032013-07-31 15:11:08 +0200852{
853 phys_mem_alloc = alloc;
854}
855
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200856static uint16_t phys_section_add(PhysPageMap *map,
857 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200858{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200859 /* The physical section number is ORed with a page-aligned
860 * pointer to produce the iotlb entries. Thus it should
861 * never overflow into the page-aligned value.
862 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200863 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200864
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200865 if (map->sections_nb == map->sections_nb_alloc) {
866 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
867 map->sections = g_renew(MemoryRegionSection, map->sections,
868 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200869 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200870 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200871 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200872 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200873}
874
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200875static void phys_section_destroy(MemoryRegion *mr)
876{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200877 memory_region_unref(mr);
878
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200879 if (mr->subpage) {
880 subpage_t *subpage = container_of(mr, subpage_t, iomem);
881 memory_region_destroy(&subpage->iomem);
882 g_free(subpage);
883 }
884}
885
Paolo Bonzini60926662013-05-29 12:30:26 +0200886static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200887{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200888 while (map->sections_nb > 0) {
889 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200890 phys_section_destroy(section->mr);
891 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200892 g_free(map->sections);
893 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200894}
895
Avi Kivityac1970f2012-10-03 16:22:53 +0200896static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200897{
898 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200899 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200900 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200901 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200902 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200903 MemoryRegionSection subsection = {
904 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200905 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200906 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200907 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200908
Avi Kivityf3705d52012-03-08 16:16:34 +0200909 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200910
Avi Kivityf3705d52012-03-08 16:16:34 +0200911 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200912 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100913 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200914 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200915 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200916 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200917 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200918 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200919 }
920 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200921 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200922 subpage_register(subpage, start, end,
923 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200924}
925
926
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200927static void register_multipage(AddressSpaceDispatch *d,
928 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +0000929{
Avi Kivitya8170e52012-10-23 12:30:10 +0200930 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200931 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200932 uint64_t num_pages = int128_get64(int128_rshift(section->size,
933 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +0200934
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200935 assert(num_pages);
936 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +0000937}
938
Avi Kivityac1970f2012-10-03 16:22:53 +0200939static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200940{
Paolo Bonzini89ae3372013-06-02 10:39:07 +0200941 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +0200942 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +0200943 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200944 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200945
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200946 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
947 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
948 - now.offset_within_address_space;
949
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200950 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +0200951 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200952 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200953 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200954 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200955 while (int128_ne(remain.size, now.size)) {
956 remain.size = int128_sub(remain.size, now.size);
957 remain.offset_within_address_space += int128_get64(now.size);
958 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -0400959 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200960 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +0200961 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +0800962 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200963 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +0200964 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400965 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200966 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +0200967 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -0400968 }
Avi Kivity0f0cb162012-02-13 17:14:32 +0200969 }
970}
971
Sheng Yang62a27442010-01-26 19:21:16 +0800972void qemu_flush_coalesced_mmio_buffer(void)
973{
974 if (kvm_enabled())
975 kvm_flush_coalesced_mmio_buffer();
976}
977
Umesh Deshpandeb2a86582011-08-17 00:01:33 -0700978void qemu_mutex_lock_ramlist(void)
979{
980 qemu_mutex_lock(&ram_list.mutex);
981}
982
983void qemu_mutex_unlock_ramlist(void)
984{
985 qemu_mutex_unlock(&ram_list.mutex);
986}
987
Markus Armbrustere1e84ba2013-07-31 15:11:10 +0200988#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -0300989
990#include <sys/vfs.h>
991
992#define HUGETLBFS_MAGIC 0x958458f6
993
994static long gethugepagesize(const char *path)
995{
996 struct statfs fs;
997 int ret;
998
999 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001000 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001001 } while (ret != 0 && errno == EINTR);
1002
1003 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001004 perror(path);
1005 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001006 }
1007
1008 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001009 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001010
1011 return fs.f_bsize;
1012}
1013
Alex Williamson04b16652010-07-02 11:13:17 -06001014static void *file_ram_alloc(RAMBlock *block,
1015 ram_addr_t memory,
1016 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001017{
1018 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001019 char *sanitized_name;
1020 char *c;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001021 void *area;
1022 int fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001023 unsigned long hpagesize;
1024
1025 hpagesize = gethugepagesize(path);
1026 if (!hpagesize) {
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001027 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001028 }
1029
1030 if (memory < hpagesize) {
1031 return NULL;
1032 }
1033
1034 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1035 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001036 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001037 }
1038
Peter Feiner8ca761f2013-03-04 13:54:25 -05001039 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1040 sanitized_name = g_strdup(block->mr->name);
1041 for (c = sanitized_name; *c != '\0'; c++) {
1042 if (*c == '/')
1043 *c = '_';
1044 }
1045
1046 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1047 sanitized_name);
1048 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001049
1050 fd = mkstemp(filename);
1051 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001052 perror("unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001053 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001054 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001055 }
1056 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001057 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001058
1059 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1060
1061 /*
1062 * ftruncate is not supported by hugetlbfs in older
1063 * hosts, so don't bother bailing out on errors.
1064 * If anything goes wrong with it under other filesystems,
1065 * mmap will fail.
1066 */
1067 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001068 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03001069
Marcelo Tosattic9027602010-03-01 20:25:08 -03001070 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001071 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001072 perror("file_ram_alloc: can't mmap RAM pages");
1073 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001074 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001075 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001076
1077 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001078 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001079 }
1080
Alex Williamson04b16652010-07-02 11:13:17 -06001081 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001082 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001083
1084error:
1085 if (mem_prealloc) {
1086 exit(1);
1087 }
1088 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001089}
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001090#else
1091static void *file_ram_alloc(RAMBlock *block,
1092 ram_addr_t memory,
1093 const char *path)
1094{
1095 fprintf(stderr, "-mem-path not supported on this host\n");
1096 exit(1);
1097}
Marcelo Tosattic9027602010-03-01 20:25:08 -03001098#endif
1099
Alex Williamsond17b5282010-06-25 11:08:38 -06001100static ram_addr_t find_ram_offset(ram_addr_t size)
1101{
Alex Williamson04b16652010-07-02 11:13:17 -06001102 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001103 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001104
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001105 assert(size != 0); /* it would hand out same offset multiple times */
1106
Paolo Bonzinia3161032012-11-14 15:54:48 +01001107 if (QTAILQ_EMPTY(&ram_list.blocks))
Alex Williamson04b16652010-07-02 11:13:17 -06001108 return 0;
1109
Paolo Bonzinia3161032012-11-14 15:54:48 +01001110 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001111 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001112
1113 end = block->offset + block->length;
1114
Paolo Bonzinia3161032012-11-14 15:54:48 +01001115 QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001116 if (next_block->offset >= end) {
1117 next = MIN(next, next_block->offset);
1118 }
1119 }
1120 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001121 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001122 mingap = next - end;
1123 }
1124 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001125
1126 if (offset == RAM_ADDR_MAX) {
1127 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1128 (uint64_t)size);
1129 abort();
1130 }
1131
Alex Williamson04b16652010-07-02 11:13:17 -06001132 return offset;
1133}
1134
Juan Quintela652d7ec2012-07-20 10:37:54 +02001135ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001136{
Alex Williamsond17b5282010-06-25 11:08:38 -06001137 RAMBlock *block;
1138 ram_addr_t last = 0;
1139
Paolo Bonzinia3161032012-11-14 15:54:48 +01001140 QTAILQ_FOREACH(block, &ram_list.blocks, next)
Alex Williamsond17b5282010-06-25 11:08:38 -06001141 last = MAX(last, block->offset + block->length);
1142
1143 return last;
1144}
1145
Jason Baronddb97f12012-08-02 15:44:16 -04001146static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1147{
1148 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001149
1150 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001151 if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1152 "dump-guest-core", true)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001153 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1154 if (ret) {
1155 perror("qemu_madvise");
1156 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1157 "but dump_guest_core=off specified\n");
1158 }
1159 }
1160}
1161
Hu Tao20cfe882014-04-02 15:13:26 +08001162static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001163{
Hu Tao20cfe882014-04-02 15:13:26 +08001164 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001165
Paolo Bonzinia3161032012-11-14 15:54:48 +01001166 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001167 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001168 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001169 }
1170 }
Hu Tao20cfe882014-04-02 15:13:26 +08001171
1172 return NULL;
1173}
1174
1175void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1176{
1177 RAMBlock *new_block = find_ram_block(addr);
1178 RAMBlock *block;
1179
Avi Kivityc5705a72011-12-20 15:59:12 +02001180 assert(new_block);
1181 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001182
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001183 if (dev) {
1184 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001185 if (id) {
1186 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001187 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001188 }
1189 }
1190 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1191
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001192 /* This assumes the iothread lock is taken here too. */
1193 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001194 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001195 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001196 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1197 new_block->idstr);
1198 abort();
1199 }
1200 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001201 qemu_mutex_unlock_ramlist();
Avi Kivityc5705a72011-12-20 15:59:12 +02001202}
1203
Hu Tao20cfe882014-04-02 15:13:26 +08001204void qemu_ram_unset_idstr(ram_addr_t addr)
1205{
1206 RAMBlock *block = find_ram_block(addr);
1207
1208 if (block) {
1209 memset(block->idstr, 0, sizeof(block->idstr));
1210 }
1211}
1212
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001213static int memory_try_enable_merging(void *addr, size_t len)
1214{
Markus Armbruster2ff3de62013-07-04 15:09:22 +02001215 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001216 /* disabled by the user */
1217 return 0;
1218 }
1219
1220 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1221}
1222
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001223static ram_addr_t ram_block_add(RAMBlock *new_block)
Avi Kivityc5705a72011-12-20 15:59:12 +02001224{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001225 RAMBlock *block;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001226 ram_addr_t old_ram_size, new_ram_size;
1227
1228 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001229
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001230 /* This assumes the iothread lock is taken here too. */
1231 qemu_mutex_lock_ramlist();
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001232 new_block->offset = find_ram_offset(new_block->length);
1233
1234 if (!new_block->host) {
1235 if (xen_enabled()) {
1236 xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
1237 } else {
1238 new_block->host = phys_mem_alloc(new_block->length);
Markus Armbruster39228252013-07-31 15:11:11 +02001239 if (!new_block->host) {
1240 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1241 new_block->mr->name, strerror(errno));
1242 exit(1);
1243 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001244 memory_try_enable_merging(new_block->host, new_block->length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001245 }
1246 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001247
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001248 /* Keep the list sorted from biggest to smallest block. */
1249 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1250 if (block->length < new_block->length) {
1251 break;
1252 }
1253 }
1254 if (block) {
1255 QTAILQ_INSERT_BEFORE(block, new_block, next);
1256 } else {
1257 QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1258 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001259 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001260
Umesh Deshpandef798b072011-08-18 11:41:17 -07001261 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001262 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001263
Juan Quintela2152f5c2013-10-08 13:52:02 +02001264 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1265
1266 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001267 int i;
1268 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1269 ram_list.dirty_memory[i] =
1270 bitmap_zero_extend(ram_list.dirty_memory[i],
1271 old_ram_size, new_ram_size);
1272 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001273 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001274 cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001275
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001276 qemu_ram_setup_dump(new_block->host, new_block->length);
1277 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
1278 qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
Jason Baronddb97f12012-08-02 15:44:16 -04001279
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001280 if (kvm_enabled()) {
1281 kvm_setup_guest_memory(new_block->host, new_block->length);
1282 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001283
1284 return new_block->offset;
1285}
1286
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001287ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1288 const char *mem_path)
1289{
1290 RAMBlock *new_block;
1291
1292 if (xen_enabled()) {
1293 fprintf(stderr, "-mem-path not supported with Xen\n");
1294 exit(1);
1295 }
1296
1297 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1298 /*
1299 * file_ram_alloc() needs to allocate just like
1300 * phys_mem_alloc, but we haven't bothered to provide
1301 * a hook there.
1302 */
1303 fprintf(stderr,
1304 "-mem-path not supported with this accelerator\n");
1305 exit(1);
1306 }
1307
1308 size = TARGET_PAGE_ALIGN(size);
1309 new_block = g_malloc0(sizeof(*new_block));
1310 new_block->mr = mr;
1311 new_block->length = size;
1312 new_block->host = file_ram_alloc(new_block, size, mem_path);
1313 return ram_block_add(new_block);
1314}
1315
1316ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1317 MemoryRegion *mr)
1318{
1319 RAMBlock *new_block;
1320
1321 size = TARGET_PAGE_ALIGN(size);
1322 new_block = g_malloc0(sizeof(*new_block));
1323 new_block->mr = mr;
1324 new_block->length = size;
1325 new_block->fd = -1;
1326 new_block->host = host;
1327 if (host) {
1328 new_block->flags |= RAM_PREALLOC_MASK;
1329 }
1330 return ram_block_add(new_block);
1331}
1332
Avi Kivityc5705a72011-12-20 15:59:12 +02001333ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00001334{
Avi Kivityc5705a72011-12-20 15:59:12 +02001335 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00001336}
bellarde9a1ab12007-02-08 23:08:38 +00001337
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001338void qemu_ram_free_from_ptr(ram_addr_t addr)
1339{
1340 RAMBlock *block;
1341
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001342 /* This assumes the iothread lock is taken here too. */
1343 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001344 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001345 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001346 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001347 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001348 ram_list.version++;
Anthony Liguori7267c092011-08-20 22:09:37 -05001349 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001350 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001351 }
1352 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001353 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001354}
1355
Anthony Liguoric227f092009-10-01 16:12:16 -05001356void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001357{
Alex Williamson04b16652010-07-02 11:13:17 -06001358 RAMBlock *block;
1359
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001360 /* This assumes the iothread lock is taken here too. */
1361 qemu_mutex_lock_ramlist();
Paolo Bonzinia3161032012-11-14 15:54:48 +01001362 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001363 if (addr == block->offset) {
Paolo Bonzinia3161032012-11-14 15:54:48 +01001364 QTAILQ_REMOVE(&ram_list.blocks, block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001365 ram_list.mru_block = NULL;
Umesh Deshpandef798b072011-08-18 11:41:17 -07001366 ram_list.version++;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001367 if (block->flags & RAM_PREALLOC_MASK) {
1368 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001369 } else if (xen_enabled()) {
1370 xen_invalidate_map_cache_entry(block->host);
Stefan Weil089f3f72013-09-18 07:48:15 +02001371#ifndef _WIN32
Markus Armbruster3435f392013-07-31 15:11:07 +02001372 } else if (block->fd >= 0) {
1373 munmap(block->host, block->length);
1374 close(block->fd);
Stefan Weil089f3f72013-09-18 07:48:15 +02001375#endif
Alex Williamson04b16652010-07-02 11:13:17 -06001376 } else {
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001377 qemu_anon_ram_free(block->host, block->length);
Alex Williamson04b16652010-07-02 11:13:17 -06001378 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001379 g_free(block);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001380 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001381 }
1382 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001383 qemu_mutex_unlock_ramlist();
Alex Williamson04b16652010-07-02 11:13:17 -06001384
bellarde9a1ab12007-02-08 23:08:38 +00001385}
1386
Huang Yingcd19cfa2011-03-02 08:56:19 +01001387#ifndef _WIN32
1388void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1389{
1390 RAMBlock *block;
1391 ram_addr_t offset;
1392 int flags;
1393 void *area, *vaddr;
1394
Paolo Bonzinia3161032012-11-14 15:54:48 +01001395 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001396 offset = addr - block->offset;
1397 if (offset < block->length) {
1398 vaddr = block->host + offset;
1399 if (block->flags & RAM_PREALLOC_MASK) {
1400 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001401 } else if (xen_enabled()) {
1402 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001403 } else {
1404 flags = MAP_FIXED;
1405 munmap(vaddr, length);
Markus Armbruster3435f392013-07-31 15:11:07 +02001406 if (block->fd >= 0) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001407#ifdef MAP_POPULATE
Markus Armbruster3435f392013-07-31 15:11:07 +02001408 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1409 MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001410#else
Markus Armbruster3435f392013-07-31 15:11:07 +02001411 flags |= MAP_PRIVATE;
Huang Yingcd19cfa2011-03-02 08:56:19 +01001412#endif
Markus Armbruster3435f392013-07-31 15:11:07 +02001413 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1414 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001415 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001416 /*
1417 * Remap needs to match alloc. Accelerators that
1418 * set phys_mem_alloc never remap. If they did,
1419 * we'd need a remap hook here.
1420 */
1421 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1422
Huang Yingcd19cfa2011-03-02 08:56:19 +01001423 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1424 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1425 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001426 }
1427 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001428 fprintf(stderr, "Could not remap addr: "
1429 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001430 length, addr);
1431 exit(1);
1432 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001433 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001434 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001435 }
1436 return;
1437 }
1438 }
1439}
1440#endif /* !_WIN32 */
1441
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001442/* Return a host pointer to ram allocated with qemu_ram_alloc.
1443 With the exception of the softmmu code in this file, this should
1444 only be used for local memory (e.g. video ram) that the device owns,
1445 and knows it isn't going to access beyond the end of the block.
1446
1447 It should not be used for general purpose DMA.
1448 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1449 */
1450void *qemu_get_ram_ptr(ram_addr_t addr)
1451{
1452 RAMBlock *block = qemu_get_ram_block(addr);
1453
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001454 if (xen_enabled()) {
1455 /* We need to check if the requested address is in the RAM
1456 * because we don't want to map the entire memory in QEMU.
1457 * In that case just map until the end of the page.
1458 */
1459 if (block->offset == 0) {
1460 return xen_map_cache(addr, 0, 0);
1461 } else if (block->host == NULL) {
1462 block->host =
1463 xen_map_cache(block->offset, block->length, 1);
1464 }
1465 }
1466 return block->host + (addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001467}
1468
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001469/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
1470 * but takes a size argument */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001471static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001472{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001473 if (*size == 0) {
1474 return NULL;
1475 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001476 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001477 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001478 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001479 RAMBlock *block;
1480
Paolo Bonzinia3161032012-11-14 15:54:48 +01001481 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001482 if (addr - block->offset < block->length) {
1483 if (addr - block->offset + *size > block->length)
1484 *size = block->length - addr + block->offset;
1485 return block->host + (addr - block->offset);
1486 }
1487 }
1488
1489 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1490 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001491 }
1492}
1493
Paolo Bonzini7443b432013-06-03 12:44:02 +02001494/* Some of the softmmu routines need to translate from a host pointer
1495 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001496MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001497{
pbrook94a6b542009-04-11 17:15:54 +00001498 RAMBlock *block;
1499 uint8_t *host = ptr;
1500
Jan Kiszka868bb332011-06-21 22:59:09 +02001501 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001502 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001503 return qemu_get_ram_block(*ram_addr)->mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001504 }
1505
Paolo Bonzini23887b72013-05-06 14:28:39 +02001506 block = ram_list.mru_block;
1507 if (block && block->host && host - block->host < block->length) {
1508 goto found;
1509 }
1510
Paolo Bonzinia3161032012-11-14 15:54:48 +01001511 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001512 /* This case append when the block is not mapped. */
1513 if (block->host == NULL) {
1514 continue;
1515 }
Alex Williamsonf471a172010-06-11 11:11:42 -06001516 if (host - block->host < block->length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001517 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001518 }
pbrook94a6b542009-04-11 17:15:54 +00001519 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001520
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001521 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001522
1523found:
1524 *ram_addr = block->offset + (host - block->host);
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001525 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001526}
Alex Williamsonf471a172010-06-11 11:11:42 -06001527
Avi Kivitya8170e52012-10-23 12:30:10 +02001528static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001529 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001530{
Juan Quintela52159192013-10-08 12:44:04 +02001531 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001532 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001533 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001534 switch (size) {
1535 case 1:
1536 stb_p(qemu_get_ram_ptr(ram_addr), val);
1537 break;
1538 case 2:
1539 stw_p(qemu_get_ram_ptr(ram_addr), val);
1540 break;
1541 case 4:
1542 stl_p(qemu_get_ram_ptr(ram_addr), val);
1543 break;
1544 default:
1545 abort();
1546 }
Juan Quintela52159192013-10-08 12:44:04 +02001547 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_MIGRATION);
1548 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_VGA);
bellardf23db162005-08-21 19:12:28 +00001549 /* we remove the notdirty callback only if the code has been
1550 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001551 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001552 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001553 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001554 }
bellard1ccde1c2004-02-06 19:46:14 +00001555}
1556
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001557static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1558 unsigned size, bool is_write)
1559{
1560 return is_write;
1561}
1562
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001563static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001564 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001565 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001566 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001567};
1568
pbrook0f459d12008-06-09 00:20:13 +00001569/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00001570static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001571{
Andreas Färber93afead2013-08-26 03:41:01 +02001572 CPUState *cpu = current_cpu;
1573 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001574 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001575 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001576 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001577 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001578
Andreas Färberff4700b2013-08-26 18:23:18 +02001579 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001580 /* We re-entered the check after replacing the TB. Now raise
1581 * the debug interrupt so that is will trigger after the
1582 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001583 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001584 return;
1585 }
Andreas Färber93afead2013-08-26 03:41:01 +02001586 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001587 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001588 if ((vaddr == (wp->vaddr & len_mask) ||
1589 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00001590 wp->flags |= BP_WATCHPOINT_HIT;
Andreas Färberff4700b2013-08-26 18:23:18 +02001591 if (!cpu->watchpoint_hit) {
1592 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001593 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001594 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001595 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001596 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001597 } else {
1598 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001599 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001600 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001601 }
aliguori06d55cc2008-11-18 20:24:06 +00001602 }
aliguori6e140f22008-11-18 20:37:55 +00001603 } else {
1604 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001605 }
1606 }
1607}
1608
pbrook6658ffb2007-03-16 23:58:11 +00001609/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1610 so these check for a hit then pass through to the normal out-of-line
1611 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001612static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001613 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001614{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001615 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
1616 switch (size) {
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10001617 case 1: return ldub_phys(&address_space_memory, addr);
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10001618 case 2: return lduw_phys(&address_space_memory, addr);
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01001619 case 4: return ldl_phys(&address_space_memory, addr);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001620 default: abort();
1621 }
pbrook6658ffb2007-03-16 23:58:11 +00001622}
1623
Avi Kivitya8170e52012-10-23 12:30:10 +02001624static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001625 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00001626{
Avi Kivity1ec9b902012-01-02 12:47:48 +02001627 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
1628 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001629 case 1:
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10001630 stb_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001631 break;
1632 case 2:
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10001633 stw_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001634 break;
1635 case 4:
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10001636 stl_phys(&address_space_memory, addr, val);
Max Filippov67364152012-01-29 00:01:40 +04001637 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001638 default: abort();
1639 }
pbrook6658ffb2007-03-16 23:58:11 +00001640}
1641
Avi Kivity1ec9b902012-01-02 12:47:48 +02001642static const MemoryRegionOps watch_mem_ops = {
1643 .read = watch_mem_read,
1644 .write = watch_mem_write,
1645 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001646};
pbrook6658ffb2007-03-16 23:58:11 +00001647
Avi Kivitya8170e52012-10-23 12:30:10 +02001648static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001649 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001650{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001651 subpage_t *subpage = opaque;
1652 uint8_t buf[4];
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001653
blueswir1db7b5422007-05-26 17:36:03 +00001654#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001655 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001656 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001657#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001658 address_space_read(subpage->as, addr + subpage->base, buf, len);
1659 switch (len) {
1660 case 1:
1661 return ldub_p(buf);
1662 case 2:
1663 return lduw_p(buf);
1664 case 4:
1665 return ldl_p(buf);
1666 default:
1667 abort();
1668 }
blueswir1db7b5422007-05-26 17:36:03 +00001669}
1670
Avi Kivitya8170e52012-10-23 12:30:10 +02001671static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02001672 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00001673{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001674 subpage_t *subpage = opaque;
1675 uint8_t buf[4];
1676
blueswir1db7b5422007-05-26 17:36:03 +00001677#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001678 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001679 " value %"PRIx64"\n",
1680 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001681#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001682 switch (len) {
1683 case 1:
1684 stb_p(buf, value);
1685 break;
1686 case 2:
1687 stw_p(buf, value);
1688 break;
1689 case 4:
1690 stl_p(buf, value);
1691 break;
1692 default:
1693 abort();
1694 }
1695 address_space_write(subpage->as, addr + subpage->base, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00001696}
1697
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001698static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08001699 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001700{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001701 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001702#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001703 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001704 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001705#endif
1706
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001707 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08001708 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001709}
1710
Avi Kivity70c68e42012-01-02 12:32:48 +02001711static const MemoryRegionOps subpage_ops = {
1712 .read = subpage_read,
1713 .write = subpage_write,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02001714 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02001715 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00001716};
1717
Anthony Liguoric227f092009-10-01 16:12:16 -05001718static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001719 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00001720{
1721 int idx, eidx;
1722
1723 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1724 return -1;
1725 idx = SUBPAGE_IDX(start);
1726 eidx = SUBPAGE_IDX(end);
1727#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001728 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
1729 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00001730#endif
blueswir1db7b5422007-05-26 17:36:03 +00001731 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02001732 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00001733 }
1734
1735 return 0;
1736}
1737
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001738static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00001739{
Anthony Liguoric227f092009-10-01 16:12:16 -05001740 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00001741
Anthony Liguori7267c092011-08-20 22:09:37 -05001742 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00001743
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001744 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00001745 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001746 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Avi Kivity70c68e42012-01-02 12:32:48 +02001747 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02001748 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00001749#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001750 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
1751 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00001752#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001753 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00001754
1755 return mmio;
1756}
1757
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001758static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
1759 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02001760{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001761 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02001762 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001763 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02001764 .mr = mr,
1765 .offset_within_address_space = 0,
1766 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001767 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02001768 };
1769
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001770 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02001771}
1772
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001773MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02001774{
Edgar E. Iglesias77717092013-11-07 19:55:56 +01001775 return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02001776}
1777
Avi Kivitye9179ce2009-06-14 11:38:52 +03001778static void io_mem_init(void)
1779{
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001780 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, "rom", UINT64_MAX);
1781 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001782 "unassigned", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001783 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001784 "notdirty", UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04001785 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001786 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03001787}
1788
Avi Kivityac1970f2012-10-03 16:22:53 +02001789static void mem_begin(MemoryListener *listener)
1790{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001791 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001792 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
1793 uint16_t n;
1794
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001795 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001796 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001797 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001798 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001799 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001800 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07001801 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001802 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02001803
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02001804 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02001805 d->as = as;
1806 as->next_dispatch = d;
1807}
1808
1809static void mem_commit(MemoryListener *listener)
1810{
1811 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02001812 AddressSpaceDispatch *cur = as->dispatch;
1813 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02001814
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001815 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02001816
Paolo Bonzini0475d942013-05-29 12:28:21 +02001817 as->dispatch = next;
Avi Kivityac1970f2012-10-03 16:22:53 +02001818
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001819 if (cur) {
1820 phys_sections_free(&cur->map);
1821 g_free(cur);
1822 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001823}
1824
Avi Kivity1d711482012-10-02 18:54:45 +02001825static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02001826{
Andreas Färber182735e2013-05-29 22:29:20 +02001827 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02001828
1829 /* since each CPU stores ram addresses in its TLB cache, we must
1830 reset the modified entries */
1831 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02001832 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01001833 /* FIXME: Disentangle the cpu.h circular files deps so we can
1834 directly get the right CPU from listener. */
1835 if (cpu->tcg_as_listener != listener) {
1836 continue;
1837 }
Andreas Färber00c8cb02013-09-04 02:19:44 +02001838 tlb_flush(cpu, 1);
Avi Kivity117712c2012-02-12 21:23:17 +02001839 }
Avi Kivity50c1e142012-02-08 21:36:02 +02001840}
1841
Avi Kivity93632742012-02-08 16:54:16 +02001842static void core_log_global_start(MemoryListener *listener)
1843{
Juan Quintela981fdf22013-10-10 11:54:09 +02001844 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02001845}
1846
1847static void core_log_global_stop(MemoryListener *listener)
1848{
Juan Quintela981fdf22013-10-10 11:54:09 +02001849 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02001850}
1851
Avi Kivity93632742012-02-08 16:54:16 +02001852static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02001853 .log_global_start = core_log_global_start,
1854 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02001855 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02001856};
1857
Avi Kivityac1970f2012-10-03 16:22:53 +02001858void address_space_init_dispatch(AddressSpace *as)
1859{
Paolo Bonzini00752702013-05-29 12:13:54 +02001860 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001861 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02001862 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02001863 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02001864 .region_add = mem_add,
1865 .region_nop = mem_add,
1866 .priority = 0,
1867 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001868 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02001869}
1870
Avi Kivity83f3c252012-10-07 12:59:55 +02001871void address_space_destroy_dispatch(AddressSpace *as)
1872{
1873 AddressSpaceDispatch *d = as->dispatch;
1874
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001875 memory_listener_unregister(&as->dispatch_listener);
Avi Kivity83f3c252012-10-07 12:59:55 +02001876 g_free(d);
1877 as->dispatch = NULL;
1878}
1879
Avi Kivity62152b82011-07-26 14:26:14 +03001880static void memory_map_init(void)
1881{
Anthony Liguori7267c092011-08-20 22:09:37 -05001882 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01001883
Paolo Bonzini57271d62013-11-07 17:14:37 +01001884 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001885 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03001886
Anthony Liguori7267c092011-08-20 22:09:37 -05001887 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02001888 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
1889 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00001890 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02001891
Avi Kivityf6790af2012-10-02 20:13:51 +02001892 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03001893}
1894
1895MemoryRegion *get_system_memory(void)
1896{
1897 return system_memory;
1898}
1899
Avi Kivity309cb472011-08-08 16:09:03 +03001900MemoryRegion *get_system_io(void)
1901{
1902 return system_io;
1903}
1904
pbrooke2eef172008-06-08 01:09:01 +00001905#endif /* !defined(CONFIG_USER_ONLY) */
1906
bellard13eb76e2004-01-24 15:23:36 +00001907/* physical memory access (slow version, mainly for debug) */
1908#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02001909int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00001910 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00001911{
1912 int l, flags;
1913 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00001914 void * p;
bellard13eb76e2004-01-24 15:23:36 +00001915
1916 while (len > 0) {
1917 page = addr & TARGET_PAGE_MASK;
1918 l = (page + TARGET_PAGE_SIZE) - addr;
1919 if (l > len)
1920 l = len;
1921 flags = page_get_flags(page);
1922 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00001923 return -1;
bellard13eb76e2004-01-24 15:23:36 +00001924 if (is_write) {
1925 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00001926 return -1;
bellard579a97f2007-11-11 14:26:47 +00001927 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001928 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00001929 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001930 memcpy(p, buf, l);
1931 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00001932 } else {
1933 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00001934 return -1;
bellard579a97f2007-11-11 14:26:47 +00001935 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00001936 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00001937 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00001938 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00001939 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00001940 }
1941 len -= l;
1942 buf += l;
1943 addr += l;
1944 }
Paul Brooka68fe892010-03-01 00:08:59 +00001945 return 0;
bellard13eb76e2004-01-24 15:23:36 +00001946}
bellard8df1cd02005-01-28 22:37:22 +00001947
bellard13eb76e2004-01-24 15:23:36 +00001948#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001949
Avi Kivitya8170e52012-10-23 12:30:10 +02001950static void invalidate_and_set_dirty(hwaddr addr,
1951 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001952{
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001953 if (cpu_physical_memory_is_clean(addr)) {
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001954 /* invalidate code */
1955 tb_invalidate_phys_page_range(addr, addr + length, 0);
1956 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02001957 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_VGA);
1958 cpu_physical_memory_set_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001959 }
Anthony PERARDe2269392012-10-03 13:49:22 +00001960 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00001961}
1962
Richard Henderson23326162013-07-08 14:55:59 -07001963static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02001964{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02001965 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07001966
1967 /* Regions are assumed to support 1-4 byte accesses unless
1968 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07001969 if (access_size_max == 0) {
1970 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001971 }
Richard Henderson23326162013-07-08 14:55:59 -07001972
1973 /* Bound the maximum access by the alignment of the address. */
1974 if (!mr->ops->impl.unaligned) {
1975 unsigned align_size_max = addr & -addr;
1976 if (align_size_max != 0 && align_size_max < access_size_max) {
1977 access_size_max = align_size_max;
1978 }
1979 }
1980
1981 /* Don't attempt accesses larger than the maximum. */
1982 if (l > access_size_max) {
1983 l = access_size_max;
1984 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02001985 if (l & (l - 1)) {
1986 l = 1 << (qemu_fls(l) - 1);
1987 }
Richard Henderson23326162013-07-08 14:55:59 -07001988
1989 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02001990}
1991
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02001992bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02001993 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00001994{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001995 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00001996 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001997 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001998 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02001999 MemoryRegion *mr;
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002000 bool error = false;
ths3b46e622007-09-17 08:09:54 +00002001
bellard13eb76e2004-01-24 15:23:36 +00002002 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002003 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002004 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002005
bellard13eb76e2004-01-24 15:23:36 +00002006 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002007 if (!memory_access_is_direct(mr, is_write)) {
2008 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002009 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002010 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002011 switch (l) {
2012 case 8:
2013 /* 64 bit write access */
2014 val = ldq_p(buf);
2015 error |= io_mem_write(mr, addr1, val, 8);
2016 break;
2017 case 4:
bellard1c213d12005-09-03 10:49:04 +00002018 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002019 val = ldl_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002020 error |= io_mem_write(mr, addr1, val, 4);
Richard Henderson23326162013-07-08 14:55:59 -07002021 break;
2022 case 2:
bellard1c213d12005-09-03 10:49:04 +00002023 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002024 val = lduw_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002025 error |= io_mem_write(mr, addr1, val, 2);
Richard Henderson23326162013-07-08 14:55:59 -07002026 break;
2027 case 1:
bellard1c213d12005-09-03 10:49:04 +00002028 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002029 val = ldub_p(buf);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002030 error |= io_mem_write(mr, addr1, val, 1);
Richard Henderson23326162013-07-08 14:55:59 -07002031 break;
2032 default:
2033 abort();
bellard13eb76e2004-01-24 15:23:36 +00002034 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002035 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002036 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002037 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002038 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002039 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002040 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002041 }
2042 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002043 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002044 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002045 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002046 switch (l) {
2047 case 8:
2048 /* 64 bit read access */
2049 error |= io_mem_read(mr, addr1, &val, 8);
2050 stq_p(buf, val);
2051 break;
2052 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002053 /* 32 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002054 error |= io_mem_read(mr, addr1, &val, 4);
bellardc27004e2005-01-03 23:35:10 +00002055 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002056 break;
2057 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002058 /* 16 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002059 error |= io_mem_read(mr, addr1, &val, 2);
bellardc27004e2005-01-03 23:35:10 +00002060 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002061 break;
2062 case 1:
bellard1c213d12005-09-03 10:49:04 +00002063 /* 8 bit read access */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002064 error |= io_mem_read(mr, addr1, &val, 1);
bellardc27004e2005-01-03 23:35:10 +00002065 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002066 break;
2067 default:
2068 abort();
bellard13eb76e2004-01-24 15:23:36 +00002069 }
2070 } else {
2071 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002072 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002073 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002074 }
2075 }
2076 len -= l;
2077 buf += l;
2078 addr += l;
2079 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002080
2081 return error;
bellard13eb76e2004-01-24 15:23:36 +00002082}
bellard8df1cd02005-01-28 22:37:22 +00002083
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002084bool address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02002085 const uint8_t *buf, int len)
2086{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002087 return address_space_rw(as, addr, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002088}
2089
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002090bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002091{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002092 return address_space_rw(as, addr, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002093}
2094
2095
Avi Kivitya8170e52012-10-23 12:30:10 +02002096void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002097 int len, int is_write)
2098{
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002099 address_space_rw(&address_space_memory, addr, buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002100}
2101
Alexander Graf582b55a2013-12-11 14:17:44 +01002102enum write_rom_type {
2103 WRITE_DATA,
2104 FLUSH_CACHE,
2105};
2106
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002107static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002108 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002109{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002110 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002111 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002112 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002113 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002114
bellardd0ecd2a2006-04-23 17:14:48 +00002115 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002116 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002117 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002118
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002119 if (!(memory_region_is_ram(mr) ||
2120 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002121 /* do nothing */
2122 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002123 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002124 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002125 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002126 switch (type) {
2127 case WRITE_DATA:
2128 memcpy(ptr, buf, l);
2129 invalidate_and_set_dirty(addr1, l);
2130 break;
2131 case FLUSH_CACHE:
2132 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2133 break;
2134 }
bellardd0ecd2a2006-04-23 17:14:48 +00002135 }
2136 len -= l;
2137 buf += l;
2138 addr += l;
2139 }
2140}
2141
Alexander Graf582b55a2013-12-11 14:17:44 +01002142/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002143void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002144 const uint8_t *buf, int len)
2145{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002146 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002147}
2148
2149void cpu_flush_icache_range(hwaddr start, int len)
2150{
2151 /*
2152 * This function should do the same thing as an icache flush that was
2153 * triggered from within the guest. For TCG we are always cache coherent,
2154 * so there is no need to flush anything. For KVM / Xen we need to flush
2155 * the host's instruction cache at least.
2156 */
2157 if (tcg_enabled()) {
2158 return;
2159 }
2160
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002161 cpu_physical_memory_write_rom_internal(&address_space_memory,
2162 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002163}
2164
aliguori6d16c2f2009-01-22 16:59:11 +00002165typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002166 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002167 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002168 hwaddr addr;
2169 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00002170} BounceBuffer;
2171
2172static BounceBuffer bounce;
2173
aliguoriba223c22009-01-22 16:59:16 +00002174typedef struct MapClient {
2175 void *opaque;
2176 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00002177 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002178} MapClient;
2179
Blue Swirl72cf2d42009-09-12 07:36:22 +00002180static QLIST_HEAD(map_client_list, MapClient) map_client_list
2181 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002182
2183void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2184{
Anthony Liguori7267c092011-08-20 22:09:37 -05002185 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00002186
2187 client->opaque = opaque;
2188 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002189 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00002190 return client;
2191}
2192
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002193static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00002194{
2195 MapClient *client = (MapClient *)_client;
2196
Blue Swirl72cf2d42009-09-12 07:36:22 +00002197 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002198 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002199}
2200
2201static void cpu_notify_map_clients(void)
2202{
2203 MapClient *client;
2204
Blue Swirl72cf2d42009-09-12 07:36:22 +00002205 while (!QLIST_EMPTY(&map_client_list)) {
2206 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002207 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09002208 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00002209 }
2210}
2211
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002212bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2213{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002214 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002215 hwaddr l, xlat;
2216
2217 while (len > 0) {
2218 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002219 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2220 if (!memory_access_is_direct(mr, is_write)) {
2221 l = memory_access_size(mr, l, addr);
2222 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002223 return false;
2224 }
2225 }
2226
2227 len -= l;
2228 addr += l;
2229 }
2230 return true;
2231}
2232
aliguori6d16c2f2009-01-22 16:59:11 +00002233/* Map a physical memory region into a host virtual address.
2234 * May map a subset of the requested range, given by and returned in *plen.
2235 * May return NULL if resources needed to perform the mapping are exhausted.
2236 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002237 * Use cpu_register_map_client() to know when retrying the map operation is
2238 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002239 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002240void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002241 hwaddr addr,
2242 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002243 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002244{
Avi Kivitya8170e52012-10-23 12:30:10 +02002245 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002246 hwaddr done = 0;
2247 hwaddr l, xlat, base;
2248 MemoryRegion *mr, *this_mr;
2249 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002250
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002251 if (len == 0) {
2252 return NULL;
2253 }
aliguori6d16c2f2009-01-22 16:59:11 +00002254
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002255 l = len;
2256 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2257 if (!memory_access_is_direct(mr, is_write)) {
2258 if (bounce.buffer) {
2259 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002260 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002261 /* Avoid unbounded allocations */
2262 l = MIN(l, TARGET_PAGE_SIZE);
2263 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002264 bounce.addr = addr;
2265 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002266
2267 memory_region_ref(mr);
2268 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002269 if (!is_write) {
2270 address_space_read(as, addr, bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002271 }
aliguori6d16c2f2009-01-22 16:59:11 +00002272
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002273 *plen = l;
2274 return bounce.buffer;
2275 }
2276
2277 base = xlat;
2278 raddr = memory_region_get_ram_addr(mr);
2279
2280 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002281 len -= l;
2282 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002283 done += l;
2284 if (len == 0) {
2285 break;
2286 }
2287
2288 l = len;
2289 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2290 if (this_mr != mr || xlat != base + done) {
2291 break;
2292 }
aliguori6d16c2f2009-01-22 16:59:11 +00002293 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002294
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002295 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002296 *plen = done;
2297 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002298}
2299
Avi Kivityac1970f2012-10-03 16:22:53 +02002300/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002301 * Will also mark the memory as dirty if is_write == 1. access_len gives
2302 * the amount of memory that was actually read or written by the caller.
2303 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002304void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2305 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002306{
2307 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002308 MemoryRegion *mr;
2309 ram_addr_t addr1;
2310
2311 mr = qemu_ram_addr_from_host(buffer, &addr1);
2312 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002313 if (is_write) {
aliguori6d16c2f2009-01-22 16:59:11 +00002314 while (access_len) {
2315 unsigned l;
2316 l = TARGET_PAGE_SIZE;
2317 if (l > access_len)
2318 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002319 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00002320 addr1 += l;
2321 access_len -= l;
2322 }
2323 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002324 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002325 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002326 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002327 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002328 return;
2329 }
2330 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002331 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002332 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002333 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002334 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002335 memory_region_unref(bounce.mr);
aliguoriba223c22009-01-22 16:59:16 +00002336 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002337}
bellardd0ecd2a2006-04-23 17:14:48 +00002338
Avi Kivitya8170e52012-10-23 12:30:10 +02002339void *cpu_physical_memory_map(hwaddr addr,
2340 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002341 int is_write)
2342{
2343 return address_space_map(&address_space_memory, addr, plen, is_write);
2344}
2345
Avi Kivitya8170e52012-10-23 12:30:10 +02002346void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2347 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002348{
2349 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2350}
2351
bellard8df1cd02005-01-28 22:37:22 +00002352/* warning: addr must be aligned */
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002353static inline uint32_t ldl_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002354 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002355{
bellard8df1cd02005-01-28 22:37:22 +00002356 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002357 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002358 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002359 hwaddr l = 4;
2360 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002361
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002362 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002363 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002364 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002365 io_mem_read(mr, addr1, &val, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002366#if defined(TARGET_WORDS_BIGENDIAN)
2367 if (endian == DEVICE_LITTLE_ENDIAN) {
2368 val = bswap32(val);
2369 }
2370#else
2371 if (endian == DEVICE_BIG_ENDIAN) {
2372 val = bswap32(val);
2373 }
2374#endif
bellard8df1cd02005-01-28 22:37:22 +00002375 } else {
2376 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002377 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002378 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002379 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002380 switch (endian) {
2381 case DEVICE_LITTLE_ENDIAN:
2382 val = ldl_le_p(ptr);
2383 break;
2384 case DEVICE_BIG_ENDIAN:
2385 val = ldl_be_p(ptr);
2386 break;
2387 default:
2388 val = ldl_p(ptr);
2389 break;
2390 }
bellard8df1cd02005-01-28 22:37:22 +00002391 }
2392 return val;
2393}
2394
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002395uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002396{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002397 return ldl_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002398}
2399
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002400uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002401{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002402 return ldl_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002403}
2404
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002405uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002406{
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002407 return ldl_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002408}
2409
bellard84b7b8e2005-11-28 21:19:04 +00002410/* warning: addr must be aligned */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002411static inline uint64_t ldq_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002412 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002413{
bellard84b7b8e2005-11-28 21:19:04 +00002414 uint8_t *ptr;
2415 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002416 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002417 hwaddr l = 8;
2418 hwaddr addr1;
bellard84b7b8e2005-11-28 21:19:04 +00002419
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002420 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002421 false);
2422 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002423 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002424 io_mem_read(mr, addr1, &val, 8);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002425#if defined(TARGET_WORDS_BIGENDIAN)
2426 if (endian == DEVICE_LITTLE_ENDIAN) {
2427 val = bswap64(val);
2428 }
2429#else
2430 if (endian == DEVICE_BIG_ENDIAN) {
2431 val = bswap64(val);
2432 }
2433#endif
bellard84b7b8e2005-11-28 21:19:04 +00002434 } else {
2435 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002436 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002437 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002438 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002439 switch (endian) {
2440 case DEVICE_LITTLE_ENDIAN:
2441 val = ldq_le_p(ptr);
2442 break;
2443 case DEVICE_BIG_ENDIAN:
2444 val = ldq_be_p(ptr);
2445 break;
2446 default:
2447 val = ldq_p(ptr);
2448 break;
2449 }
bellard84b7b8e2005-11-28 21:19:04 +00002450 }
2451 return val;
2452}
2453
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002454uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002455{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002456 return ldq_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002457}
2458
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002459uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002460{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002461 return ldq_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002462}
2463
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002464uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002465{
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002466 return ldq_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002467}
2468
bellardaab33092005-10-30 20:48:42 +00002469/* XXX: optimize */
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002470uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00002471{
2472 uint8_t val;
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002473 address_space_rw(as, addr, &val, 1, 0);
bellardaab33092005-10-30 20:48:42 +00002474 return val;
2475}
2476
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002477/* warning: addr must be aligned */
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002478static inline uint32_t lduw_phys_internal(AddressSpace *as, hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002479 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002480{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002481 uint8_t *ptr;
2482 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002483 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002484 hwaddr l = 2;
2485 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002486
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002487 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002488 false);
2489 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002490 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002491 io_mem_read(mr, addr1, &val, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002492#if defined(TARGET_WORDS_BIGENDIAN)
2493 if (endian == DEVICE_LITTLE_ENDIAN) {
2494 val = bswap16(val);
2495 }
2496#else
2497 if (endian == DEVICE_BIG_ENDIAN) {
2498 val = bswap16(val);
2499 }
2500#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002501 } else {
2502 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002503 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002504 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002505 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002506 switch (endian) {
2507 case DEVICE_LITTLE_ENDIAN:
2508 val = lduw_le_p(ptr);
2509 break;
2510 case DEVICE_BIG_ENDIAN:
2511 val = lduw_be_p(ptr);
2512 break;
2513 default:
2514 val = lduw_p(ptr);
2515 break;
2516 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002517 }
2518 return val;
bellardaab33092005-10-30 20:48:42 +00002519}
2520
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002521uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002522{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002523 return lduw_phys_internal(as, addr, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002524}
2525
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002526uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002527{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002528 return lduw_phys_internal(as, addr, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002529}
2530
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002531uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002532{
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002533 return lduw_phys_internal(as, addr, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002534}
2535
bellard8df1cd02005-01-28 22:37:22 +00002536/* warning: addr must be aligned. The ram page is not masked as dirty
2537 and the code inside is not invalidated. It is useful if the dirty
2538 bits are used to track modified PTEs */
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002539void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00002540{
bellard8df1cd02005-01-28 22:37:22 +00002541 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002542 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002543 hwaddr l = 4;
2544 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002545
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01002546 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002547 true);
2548 if (l < 4 || !memory_access_is_direct(mr, true)) {
2549 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002550 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002551 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002552 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00002553 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00002554
2555 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002556 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00002557 /* invalidate code */
2558 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2559 /* set dirty bit */
Juan Quintela52159192013-10-08 12:44:04 +02002560 cpu_physical_memory_set_dirty_flag(addr1,
2561 DIRTY_MEMORY_MIGRATION);
2562 cpu_physical_memory_set_dirty_flag(addr1, DIRTY_MEMORY_VGA);
aliguori74576192008-10-06 14:02:03 +00002563 }
2564 }
bellard8df1cd02005-01-28 22:37:22 +00002565 }
2566}
2567
2568/* warning: addr must be aligned */
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002569static inline void stl_phys_internal(AddressSpace *as,
2570 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002571 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002572{
bellard8df1cd02005-01-28 22:37:22 +00002573 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002574 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002575 hwaddr l = 4;
2576 hwaddr addr1;
bellard8df1cd02005-01-28 22:37:22 +00002577
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002578 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002579 true);
2580 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002581#if defined(TARGET_WORDS_BIGENDIAN)
2582 if (endian == DEVICE_LITTLE_ENDIAN) {
2583 val = bswap32(val);
2584 }
2585#else
2586 if (endian == DEVICE_BIG_ENDIAN) {
2587 val = bswap32(val);
2588 }
2589#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002590 io_mem_write(mr, addr1, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00002591 } else {
bellard8df1cd02005-01-28 22:37:22 +00002592 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002593 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00002594 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002595 switch (endian) {
2596 case DEVICE_LITTLE_ENDIAN:
2597 stl_le_p(ptr, val);
2598 break;
2599 case DEVICE_BIG_ENDIAN:
2600 stl_be_p(ptr, val);
2601 break;
2602 default:
2603 stl_p(ptr, val);
2604 break;
2605 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002606 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00002607 }
2608}
2609
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002610void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002611{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002612 stl_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002613}
2614
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002615void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002616{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002617 stl_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002618}
2619
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002620void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002621{
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10002622 stl_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002623}
2624
bellardaab33092005-10-30 20:48:42 +00002625/* XXX: optimize */
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002626void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00002627{
2628 uint8_t v = val;
Edgar E. Iglesiasdb3be602013-12-17 15:29:06 +10002629 address_space_rw(as, addr, &v, 1, 1);
bellardaab33092005-10-30 20:48:42 +00002630}
2631
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002632/* warning: addr must be aligned */
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002633static inline void stw_phys_internal(AddressSpace *as,
2634 hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002635 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002636{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002637 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002638 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002639 hwaddr l = 2;
2640 hwaddr addr1;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002641
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002642 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002643 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002644#if defined(TARGET_WORDS_BIGENDIAN)
2645 if (endian == DEVICE_LITTLE_ENDIAN) {
2646 val = bswap16(val);
2647 }
2648#else
2649 if (endian == DEVICE_BIG_ENDIAN) {
2650 val = bswap16(val);
2651 }
2652#endif
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002653 io_mem_write(mr, addr1, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002654 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002655 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002656 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002657 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002658 switch (endian) {
2659 case DEVICE_LITTLE_ENDIAN:
2660 stw_le_p(ptr, val);
2661 break;
2662 case DEVICE_BIG_ENDIAN:
2663 stw_be_p(ptr, val);
2664 break;
2665 default:
2666 stw_p(ptr, val);
2667 break;
2668 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002669 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002670 }
bellardaab33092005-10-30 20:48:42 +00002671}
2672
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002673void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002674{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002675 stw_phys_internal(as, addr, val, DEVICE_NATIVE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002676}
2677
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002678void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002679{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002680 stw_phys_internal(as, addr, val, DEVICE_LITTLE_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002681}
2682
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002683void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002684{
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10002685 stw_phys_internal(as, addr, val, DEVICE_BIG_ENDIAN);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002686}
2687
bellardaab33092005-10-30 20:48:42 +00002688/* XXX: optimize */
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002689void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00002690{
2691 val = tswap64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002692 address_space_rw(as, addr, (void *) &val, 8, 1);
bellardaab33092005-10-30 20:48:42 +00002693}
2694
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002695void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002696{
2697 val = cpu_to_le64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002698 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002699}
2700
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002701void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002702{
2703 val = cpu_to_be64(val);
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01002704 address_space_rw(as, addr, (void *) &val, 8, 1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002705}
2706
aliguori5e2972f2009-03-28 17:51:36 +00002707/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02002708int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00002709 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002710{
2711 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02002712 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00002713 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00002714
2715 while (len > 0) {
2716 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02002717 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00002718 /* if no physical page mapped, return an error */
2719 if (phys_addr == -1)
2720 return -1;
2721 l = (page + TARGET_PAGE_SIZE) - addr;
2722 if (l > len)
2723 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00002724 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10002725 if (is_write) {
2726 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
2727 } else {
2728 address_space_rw(cpu->as, phys_addr, buf, l, 0);
2729 }
bellard13eb76e2004-01-24 15:23:36 +00002730 len -= l;
2731 buf += l;
2732 addr += l;
2733 }
2734 return 0;
2735}
Paul Brooka68fe892010-03-01 00:08:59 +00002736#endif
bellard13eb76e2004-01-24 15:23:36 +00002737
Blue Swirl8e4a4242013-01-06 18:30:17 +00002738#if !defined(CONFIG_USER_ONLY)
2739
2740/*
2741 * A helper function for the _utterly broken_ virtio device model to find out if
2742 * it's running on a big endian machine. Don't do this at home kids!
2743 */
2744bool virtio_is_big_endian(void);
2745bool virtio_is_big_endian(void)
2746{
2747#if defined(TARGET_WORDS_BIGENDIAN)
2748 return true;
2749#else
2750 return false;
2751#endif
2752}
2753
2754#endif
2755
Wen Congyang76f35532012-05-07 12:04:18 +08002756#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02002757bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08002758{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002759 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002760 hwaddr l = 1;
Wen Congyang76f35532012-05-07 12:04:18 +08002761
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002762 mr = address_space_translate(&address_space_memory,
2763 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08002764
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002765 return !(memory_region_is_ram(mr) ||
2766 memory_region_is_romd(mr));
Wen Congyang76f35532012-05-07 12:04:18 +08002767}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04002768
2769void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
2770{
2771 RAMBlock *block;
2772
2773 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
2774 func(block->host, block->offset, block->length, opaque);
2775 }
2776}
Peter Maydellec3f8c92013-06-27 20:53:38 +01002777#endif