blob: 487583b1bdff0194a0cf9b6bf7605604bb11566b [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
58
blueswir1db7b5422007-05-26 17:36:03 +000059//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000060
pbrook99773bd2006-04-16 15:14:59 +000061#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040062/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
63 * are protected by the ramlist lock.
64 */
Mike Day0d53d9f2015-01-21 13:45:24 +010065RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030066
67static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030068static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030069
Avi Kivityf6790af2012-10-02 20:13:51 +020070AddressSpace address_space_io;
71AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020072
Paolo Bonzini0844e002013-05-24 14:37:28 +020073MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020074static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020075
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080076/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
77#define RAM_PREALLOC (1 << 0)
78
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080079/* RAM is mmap-ed with MAP_SHARED */
80#define RAM_SHARED (1 << 1)
81
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020082/* Only a portion of RAM (used_length) is actually used, and migrated.
83 * This used_length size can change across reboots.
84 */
85#define RAM_RESIZEABLE (1 << 2)
86
pbrooke2eef172008-06-08 01:09:01 +000087#endif
bellard9fa3e852004-01-04 18:06:42 +000088
Andreas Färberbdc44642013-06-24 23:50:24 +020089struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000090/* current CPU in the current thread. It is only valid inside
91 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020092DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000093/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000094 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000095 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010096int use_icount;
bellard6a00d602005-11-21 23:25:50 +000097
pbrooke2eef172008-06-08 01:09:01 +000098#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +020099
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200100typedef struct PhysPageEntry PhysPageEntry;
101
102struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200103 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200104 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200105 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200106 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107};
108
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200109#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
110
Paolo Bonzini03f49952013-11-07 17:14:36 +0100111/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100112#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100113
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200114#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115#define P_L2_SIZE (1 << P_L2_BITS)
116
117#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
118
119typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200120
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200121typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100122 struct rcu_head rcu;
123
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200124 unsigned sections_nb;
125 unsigned sections_nb_alloc;
126 unsigned nodes_nb;
127 unsigned nodes_nb_alloc;
128 Node *nodes;
129 MemoryRegionSection *sections;
130} PhysPageMap;
131
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200132struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100133 struct rcu_head rcu;
134
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200135 /* This is a multi-level map on the physical address space.
136 * The bottom level has pointers to MemoryRegionSections.
137 */
138 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200139 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200140 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141};
142
Jan Kiszka90260c62013-05-26 21:46:51 +0200143#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
144typedef struct subpage_t {
145 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200146 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200147 hwaddr base;
148 uint16_t sub_section[TARGET_PAGE_SIZE];
149} subpage_t;
150
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200151#define PHYS_SECTION_UNASSIGNED 0
152#define PHYS_SECTION_NOTDIRTY 1
153#define PHYS_SECTION_ROM 2
154#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200155
pbrooke2eef172008-06-08 01:09:01 +0000156static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300157static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000158static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000159
Avi Kivity1ec9b902012-01-02 12:47:48 +0200160static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000161#endif
bellard54936002003-05-13 00:25:15 +0000162
Paul Brook6d9a1302010-02-28 23:55:53 +0000163#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200164
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200166{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200167 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
168 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
169 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
170 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200171 }
172}
173
Paolo Bonzinidb946042015-05-21 15:12:29 +0200174static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200175{
176 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200177 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200178 PhysPageEntry e;
179 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200180
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200181 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200182 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200185
186 e.skip = leaf ? 0 : 1;
187 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100188 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200189 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200190 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200192}
193
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200194static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
195 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200196 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200197{
198 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100199 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200200
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200201 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200202 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200203 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100205 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200206
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200208 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200209 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200210 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200211 *index += step;
212 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200213 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200214 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200215 }
216 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200217 }
218}
219
Avi Kivityac1970f2012-10-03 16:22:53 +0200220static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200221 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200222 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000223{
Avi Kivity29990972012-02-13 20:21:20 +0200224 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200225 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000226
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200227 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000228}
229
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200230/* Compact a non leaf page entry. Simply detect that the entry has a single child,
231 * and update our entry so we can skip it and go directly to the destination.
232 */
233static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
234{
235 unsigned valid_ptr = P_L2_SIZE;
236 int valid = 0;
237 PhysPageEntry *p;
238 int i;
239
240 if (lp->ptr == PHYS_MAP_NODE_NIL) {
241 return;
242 }
243
244 p = nodes[lp->ptr];
245 for (i = 0; i < P_L2_SIZE; i++) {
246 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
247 continue;
248 }
249
250 valid_ptr = i;
251 valid++;
252 if (p[i].skip) {
253 phys_page_compact(&p[i], nodes, compacted);
254 }
255 }
256
257 /* We can only compress if there's only one child. */
258 if (valid != 1) {
259 return;
260 }
261
262 assert(valid_ptr < P_L2_SIZE);
263
264 /* Don't compress if it won't fit in the # of bits we have. */
265 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
266 return;
267 }
268
269 lp->ptr = p[valid_ptr].ptr;
270 if (!p[valid_ptr].skip) {
271 /* If our only child is a leaf, make this a leaf. */
272 /* By design, we should have made this node a leaf to begin with so we
273 * should never reach here.
274 * But since it's so simple to handle this, let's do it just in case we
275 * change this rule.
276 */
277 lp->skip = 0;
278 } else {
279 lp->skip += p[valid_ptr].skip;
280 }
281}
282
283static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
284{
285 DECLARE_BITMAP(compacted, nodes_nb);
286
287 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200288 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200289 }
290}
291
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200292static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200293 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000294{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200295 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200296 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200297 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200298
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200299 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200300 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200301 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200302 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200303 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100304 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200305 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200306
307 if (sections[lp.ptr].size.hi ||
308 range_covers_byte(sections[lp.ptr].offset_within_address_space,
309 sections[lp.ptr].size.lo, addr)) {
310 return &sections[lp.ptr];
311 } else {
312 return &sections[PHYS_SECTION_UNASSIGNED];
313 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200314}
315
Blue Swirle5548612012-04-21 13:08:33 +0000316bool memory_region_is_unassigned(MemoryRegion *mr)
317{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200318 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000319 && mr != &io_mem_watch;
320}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200321
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100322/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200323static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200324 hwaddr addr,
325 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200326{
Jan Kiszka90260c62013-05-26 21:46:51 +0200327 MemoryRegionSection *section;
328 subpage_t *subpage;
329
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200330 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200331 if (resolve_subpage && section->mr->subpage) {
332 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200333 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200334 }
335 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200336}
337
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100338/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200339static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200340address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200341 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200342{
343 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100344 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200345
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200346 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347 /* Compute offset within MemoryRegionSection */
348 addr -= section->offset_within_address_space;
349
350 /* Compute offset within MemoryRegion */
351 *xlat = addr + section->offset_within_region;
352
353 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100354 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200355 return section;
356}
Jan Kiszka90260c62013-05-26 21:46:51 +0200357
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100358static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
359{
360 if (memory_region_is_ram(mr)) {
361 return !(is_write && mr->readonly);
362 }
363 if (memory_region_is_romd(mr)) {
364 return !is_write;
365 }
366
367 return false;
368}
369
Paolo Bonzini41063e12015-03-18 14:21:43 +0100370/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200371MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
372 hwaddr *xlat, hwaddr *plen,
373 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200374{
Avi Kivity30951152012-10-30 13:47:46 +0200375 IOMMUTLBEntry iotlb;
376 MemoryRegionSection *section;
377 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200378
379 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100380 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
381 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200382 mr = section->mr;
383
384 if (!mr->iommu_ops) {
385 break;
386 }
387
Le Tan8d7b8cb2014-08-16 13:55:37 +0800388 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200389 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
390 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700391 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200392 if (!(iotlb.perm & (1 << is_write))) {
393 mr = &io_mem_unassigned;
394 break;
395 }
396
397 as = iotlb.target_as;
398 }
399
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000400 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100401 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700402 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100403 }
404
Avi Kivity30951152012-10-30 13:47:46 +0200405 *xlat = addr;
406 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200407}
408
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100409/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200410MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200411address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
412 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200413{
Avi Kivity30951152012-10-30 13:47:46 +0200414 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200415 section = address_space_translate_internal(cpu->memory_dispatch,
416 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200417
418 assert(!section->mr->iommu_ops);
419 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200420}
bellard9fa3e852004-01-04 18:06:42 +0000421#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000422
Andreas Färberb170fce2013-01-20 20:23:22 +0100423#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000424
Juan Quintelae59fb372009-09-29 22:48:21 +0200425static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200426{
Andreas Färber259186a2013-01-17 18:51:17 +0100427 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200428
aurel323098dba2009-03-07 21:28:24 +0000429 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
430 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100431 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100432 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000433
434 return 0;
435}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200436
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400437static int cpu_common_pre_load(void *opaque)
438{
439 CPUState *cpu = opaque;
440
Paolo Bonziniadee6422014-12-19 12:53:14 +0100441 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400442
443 return 0;
444}
445
446static bool cpu_common_exception_index_needed(void *opaque)
447{
448 CPUState *cpu = opaque;
449
Paolo Bonziniadee6422014-12-19 12:53:14 +0100450 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400451}
452
453static const VMStateDescription vmstate_cpu_common_exception_index = {
454 .name = "cpu_common/exception_index",
455 .version_id = 1,
456 .minimum_version_id = 1,
457 .fields = (VMStateField[]) {
458 VMSTATE_INT32(exception_index, CPUState),
459 VMSTATE_END_OF_LIST()
460 }
461};
462
Andreas Färber1a1562f2013-06-17 04:09:11 +0200463const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200464 .name = "cpu_common",
465 .version_id = 1,
466 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400467 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200468 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200469 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100470 VMSTATE_UINT32(halted, CPUState),
471 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200472 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400473 },
474 .subsections = (VMStateSubsection[]) {
475 {
476 .vmsd = &vmstate_cpu_common_exception_index,
477 .needed = cpu_common_exception_index_needed,
478 } , {
479 /* empty */
480 }
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200481 }
482};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200483
pbrook9656f322008-07-01 20:01:19 +0000484#endif
485
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100486CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400487{
Andreas Färberbdc44642013-06-24 23:50:24 +0200488 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400489
Andreas Färberbdc44642013-06-24 23:50:24 +0200490 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100491 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200492 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100493 }
Glauber Costa950f1472009-06-09 12:15:18 -0400494 }
495
Andreas Färberbdc44642013-06-24 23:50:24 +0200496 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400497}
498
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000499#if !defined(CONFIG_USER_ONLY)
500void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
501{
502 /* We only support one address space per cpu at the moment. */
503 assert(cpu->as == as);
504
505 if (cpu->tcg_as_listener) {
506 memory_listener_unregister(cpu->tcg_as_listener);
507 } else {
508 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
509 }
510 cpu->tcg_as_listener->commit = tcg_commit;
511 memory_listener_register(cpu->tcg_as_listener, as);
512}
513#endif
514
Andreas Färber9349b4f2012-03-14 01:38:32 +0100515void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000516{
Andreas Färber9f09e182012-05-03 06:59:07 +0200517 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100518 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200519 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000520 int cpu_index;
521
pbrookc2764712009-03-07 15:24:59 +0000522#if defined(CONFIG_USER_ONLY)
523 cpu_list_lock();
524#endif
bellard6a00d602005-11-21 23:25:50 +0000525 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200526 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000527 cpu_index++;
528 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100529 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100530 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200531 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200532 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100533#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000534 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200535 cpu->thread_id = qemu_get_thread_id();
Paolo Bonzinicba70542015-03-09 15:28:37 +0100536 cpu_reload_memory_map(cpu);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100537#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200538 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000539#if defined(CONFIG_USER_ONLY)
540 cpu_list_unlock();
541#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200542 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
543 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
544 }
pbrookb3c77242008-06-30 16:31:04 +0000545#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600546 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000547 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100548 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200549 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000550#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100551 if (cc->vmsd != NULL) {
552 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
553 }
bellardfd6ce8f2003-05-14 19:00:11 +0000554}
555
Paul Brook94df27f2010-02-28 23:47:45 +0000556#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200557static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000558{
559 tb_invalidate_phys_page_range(pc, pc + 1, 0);
560}
561#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200562static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400563{
Max Filippove8262a12013-09-27 22:29:17 +0400564 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
565 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000566 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100567 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400568 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400569}
bellardc27004e2005-01-03 23:35:10 +0000570#endif
bellardd720b932004-04-25 17:57:43 +0000571
Paul Brookc527ee82010-03-01 03:31:14 +0000572#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200573void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000574
575{
576}
577
Peter Maydell3ee887e2014-09-12 14:06:48 +0100578int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
579 int flags)
580{
581 return -ENOSYS;
582}
583
584void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
585{
586}
587
Andreas Färber75a34032013-09-02 16:57:02 +0200588int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000589 int flags, CPUWatchpoint **watchpoint)
590{
591 return -ENOSYS;
592}
593#else
pbrook6658ffb2007-03-16 23:58:11 +0000594/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200595int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000596 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000597{
aliguoric0ce9982008-11-25 22:13:57 +0000598 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000599
Peter Maydell05068c02014-09-12 14:06:48 +0100600 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700601 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200602 error_report("tried to set invalid watchpoint at %"
603 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000604 return -EINVAL;
605 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500606 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000607
aliguoria1d1bb32008-11-18 20:07:32 +0000608 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100609 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000610 wp->flags = flags;
611
aliguori2dc9f412008-11-18 20:56:59 +0000612 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200613 if (flags & BP_GDB) {
614 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
615 } else {
616 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
617 }
aliguoria1d1bb32008-11-18 20:07:32 +0000618
Andreas Färber31b030d2013-09-04 01:29:02 +0200619 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000620
621 if (watchpoint)
622 *watchpoint = wp;
623 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000624}
625
aliguoria1d1bb32008-11-18 20:07:32 +0000626/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200627int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000628 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000629{
aliguoria1d1bb32008-11-18 20:07:32 +0000630 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000631
Andreas Färberff4700b2013-08-26 18:23:18 +0200632 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100633 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000634 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200635 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000636 return 0;
637 }
638 }
aliguoria1d1bb32008-11-18 20:07:32 +0000639 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000640}
641
aliguoria1d1bb32008-11-18 20:07:32 +0000642/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200643void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000644{
Andreas Färberff4700b2013-08-26 18:23:18 +0200645 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000646
Andreas Färber31b030d2013-09-04 01:29:02 +0200647 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000648
Anthony Liguori7267c092011-08-20 22:09:37 -0500649 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000650}
651
aliguoria1d1bb32008-11-18 20:07:32 +0000652/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200653void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000654{
aliguoric0ce9982008-11-25 22:13:57 +0000655 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000656
Andreas Färberff4700b2013-08-26 18:23:18 +0200657 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200658 if (wp->flags & mask) {
659 cpu_watchpoint_remove_by_ref(cpu, wp);
660 }
aliguoric0ce9982008-11-25 22:13:57 +0000661 }
aliguoria1d1bb32008-11-18 20:07:32 +0000662}
Peter Maydell05068c02014-09-12 14:06:48 +0100663
664/* Return true if this watchpoint address matches the specified
665 * access (ie the address range covered by the watchpoint overlaps
666 * partially or completely with the address range covered by the
667 * access).
668 */
669static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
670 vaddr addr,
671 vaddr len)
672{
673 /* We know the lengths are non-zero, but a little caution is
674 * required to avoid errors in the case where the range ends
675 * exactly at the top of the address space and so addr + len
676 * wraps round to zero.
677 */
678 vaddr wpend = wp->vaddr + wp->len - 1;
679 vaddr addrend = addr + len - 1;
680
681 return !(addr > wpend || wp->vaddr > addrend);
682}
683
Paul Brookc527ee82010-03-01 03:31:14 +0000684#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000685
686/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200687int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000688 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000689{
aliguoric0ce9982008-11-25 22:13:57 +0000690 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000691
Anthony Liguori7267c092011-08-20 22:09:37 -0500692 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000693
694 bp->pc = pc;
695 bp->flags = flags;
696
aliguori2dc9f412008-11-18 20:56:59 +0000697 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200698 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200699 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200700 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200701 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200702 }
aliguoria1d1bb32008-11-18 20:07:32 +0000703
Andreas Färberf0c3c502013-08-26 21:22:53 +0200704 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000705
Andreas Färber00b941e2013-06-29 18:55:54 +0200706 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000707 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200708 }
aliguoria1d1bb32008-11-18 20:07:32 +0000709 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000710}
711
712/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200713int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000714{
aliguoria1d1bb32008-11-18 20:07:32 +0000715 CPUBreakpoint *bp;
716
Andreas Färberf0c3c502013-08-26 21:22:53 +0200717 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000718 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200719 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000720 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000721 }
bellard4c3a88a2003-07-26 12:06:08 +0000722 }
aliguoria1d1bb32008-11-18 20:07:32 +0000723 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000724}
725
aliguoria1d1bb32008-11-18 20:07:32 +0000726/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200727void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000728{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200729 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
730
731 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000732
Anthony Liguori7267c092011-08-20 22:09:37 -0500733 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000734}
735
736/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200737void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000738{
aliguoric0ce9982008-11-25 22:13:57 +0000739 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000740
Andreas Färberf0c3c502013-08-26 21:22:53 +0200741 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200742 if (bp->flags & mask) {
743 cpu_breakpoint_remove_by_ref(cpu, bp);
744 }
aliguoric0ce9982008-11-25 22:13:57 +0000745 }
bellard4c3a88a2003-07-26 12:06:08 +0000746}
747
bellardc33a3462003-07-29 20:50:33 +0000748/* enable or disable single step mode. EXCP_DEBUG is returned by the
749 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200750void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000751{
Andreas Färbered2803d2013-06-21 20:20:45 +0200752 if (cpu->singlestep_enabled != enabled) {
753 cpu->singlestep_enabled = enabled;
754 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200755 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200756 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100757 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000758 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200759 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000760 tb_flush(env);
761 }
bellardc33a3462003-07-29 20:50:33 +0000762 }
bellardc33a3462003-07-29 20:50:33 +0000763}
764
Andreas Färbera47dddd2013-09-03 17:38:47 +0200765void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000766{
767 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000768 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000769
770 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000771 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000772 fprintf(stderr, "qemu: fatal: ");
773 vfprintf(stderr, fmt, ap);
774 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200775 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000776 if (qemu_log_enabled()) {
777 qemu_log("qemu: fatal: ");
778 qemu_log_vprintf(fmt, ap2);
779 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200780 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000781 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000782 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000783 }
pbrook493ae1f2007-11-23 16:53:59 +0000784 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000785 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200786#if defined(CONFIG_USER_ONLY)
787 {
788 struct sigaction act;
789 sigfillset(&act.sa_mask);
790 act.sa_handler = SIG_DFL;
791 sigaction(SIGABRT, &act, NULL);
792 }
793#endif
bellard75012672003-06-21 13:11:07 +0000794 abort();
795}
796
bellard01243112004-01-04 15:48:17 +0000797#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400798/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200799static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
800{
801 RAMBlock *block;
802
Paolo Bonzini43771532013-09-09 17:58:40 +0200803 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200804 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200805 goto found;
806 }
Mike Day0dc3f442013-09-05 14:41:35 -0400807 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200808 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200809 goto found;
810 }
811 }
812
813 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
814 abort();
815
816found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200817 /* It is safe to write mru_block outside the iothread lock. This
818 * is what happens:
819 *
820 * mru_block = xxx
821 * rcu_read_unlock()
822 * xxx removed from list
823 * rcu_read_lock()
824 * read mru_block
825 * mru_block = NULL;
826 * call_rcu(reclaim_ramblock, xxx);
827 * rcu_read_unlock()
828 *
829 * atomic_rcu_set is not needed here. The block was already published
830 * when it was placed into the list. Here we're just making an extra
831 * copy of the pointer.
832 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200833 ram_list.mru_block = block;
834 return block;
835}
836
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200837static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000838{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200839 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200840 RAMBlock *block;
841 ram_addr_t end;
842
843 end = TARGET_PAGE_ALIGN(start + length);
844 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000845
Mike Day0dc3f442013-09-05 14:41:35 -0400846 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200847 block = qemu_get_ram_block(start);
848 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200849 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000850 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400851 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200852}
853
854/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000855bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
856 ram_addr_t length,
857 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200858{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000859 unsigned long end, page;
860 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200861
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000862 if (length == 0) {
863 return false;
864 }
865
866 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
867 page = start >> TARGET_PAGE_BITS;
868 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
869 page, end - page);
870
871 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200872 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200873 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000874
875 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000876}
877
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100878/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200879hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200880 MemoryRegionSection *section,
881 target_ulong vaddr,
882 hwaddr paddr, hwaddr xlat,
883 int prot,
884 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000885{
Avi Kivitya8170e52012-10-23 12:30:10 +0200886 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000887 CPUWatchpoint *wp;
888
Blue Swirlcc5bea62012-04-14 14:56:48 +0000889 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000890 /* Normal RAM. */
891 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200892 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000893 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200894 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000895 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200896 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000897 }
898 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100899 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200900 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000901 }
902
903 /* Make accesses to pages with watchpoints go via the
904 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200905 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100906 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000907 /* Avoid trapping reads of pages with a write breakpoint. */
908 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200909 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000910 *address |= TLB_MMIO;
911 break;
912 }
913 }
914 }
915
916 return iotlb;
917}
bellard9fa3e852004-01-04 18:06:42 +0000918#endif /* defined(CONFIG_USER_ONLY) */
919
pbrooke2eef172008-06-08 01:09:01 +0000920#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000921
Anthony Liguoric227f092009-10-01 16:12:16 -0500922static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200923 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200924static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200925
Igor Mammedova2b257d2014-10-31 16:38:37 +0000926static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
927 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200928
929/*
930 * Set a custom physical guest memory alloator.
931 * Accelerators with unusual needs may need this. Hopefully, we can
932 * get rid of it eventually.
933 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000934void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200935{
936 phys_mem_alloc = alloc;
937}
938
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200939static uint16_t phys_section_add(PhysPageMap *map,
940 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200941{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200942 /* The physical section number is ORed with a page-aligned
943 * pointer to produce the iotlb entries. Thus it should
944 * never overflow into the page-aligned value.
945 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200946 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200947
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200948 if (map->sections_nb == map->sections_nb_alloc) {
949 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
950 map->sections = g_renew(MemoryRegionSection, map->sections,
951 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200952 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200953 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200954 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200955 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200956}
957
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200958static void phys_section_destroy(MemoryRegion *mr)
959{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200960 memory_region_unref(mr);
961
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200962 if (mr->subpage) {
963 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700964 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200965 g_free(subpage);
966 }
967}
968
Paolo Bonzini60926662013-05-29 12:30:26 +0200969static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200970{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200971 while (map->sections_nb > 0) {
972 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200973 phys_section_destroy(section->mr);
974 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200975 g_free(map->sections);
976 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200977}
978
Avi Kivityac1970f2012-10-03 16:22:53 +0200979static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200980{
981 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200982 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200983 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200984 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200985 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200986 MemoryRegionSection subsection = {
987 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200988 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200989 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200990 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200991
Avi Kivityf3705d52012-03-08 16:16:34 +0200992 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200993
Avi Kivityf3705d52012-03-08 16:16:34 +0200994 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200995 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100996 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200997 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200998 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200999 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001000 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001001 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001002 }
1003 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001004 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001005 subpage_register(subpage, start, end,
1006 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001007}
1008
1009
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001010static void register_multipage(AddressSpaceDispatch *d,
1011 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001012{
Avi Kivitya8170e52012-10-23 12:30:10 +02001013 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001014 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001015 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1016 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001017
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001018 assert(num_pages);
1019 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001020}
1021
Avi Kivityac1970f2012-10-03 16:22:53 +02001022static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001023{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001024 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001025 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001026 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001027 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001028
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001029 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1030 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1031 - now.offset_within_address_space;
1032
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001033 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001034 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001035 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001036 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001037 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001038 while (int128_ne(remain.size, now.size)) {
1039 remain.size = int128_sub(remain.size, now.size);
1040 remain.offset_within_address_space += int128_get64(now.size);
1041 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001042 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001043 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001044 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001045 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001046 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001047 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001048 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001049 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001050 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001051 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001052 }
1053}
1054
Sheng Yang62a27442010-01-26 19:21:16 +08001055void qemu_flush_coalesced_mmio_buffer(void)
1056{
1057 if (kvm_enabled())
1058 kvm_flush_coalesced_mmio_buffer();
1059}
1060
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001061void qemu_mutex_lock_ramlist(void)
1062{
1063 qemu_mutex_lock(&ram_list.mutex);
1064}
1065
1066void qemu_mutex_unlock_ramlist(void)
1067{
1068 qemu_mutex_unlock(&ram_list.mutex);
1069}
1070
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001071#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001072
1073#include <sys/vfs.h>
1074
1075#define HUGETLBFS_MAGIC 0x958458f6
1076
Hu Taofc7a5802014-09-09 13:28:01 +08001077static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001078{
1079 struct statfs fs;
1080 int ret;
1081
1082 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001083 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001084 } while (ret != 0 && errno == EINTR);
1085
1086 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001087 error_setg_errno(errp, errno, "failed to get page size of file %s",
1088 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001089 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001090 }
1091
1092 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001093 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001094
1095 return fs.f_bsize;
1096}
1097
Alex Williamson04b16652010-07-02 11:13:17 -06001098static void *file_ram_alloc(RAMBlock *block,
1099 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001100 const char *path,
1101 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001102{
1103 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001104 char *sanitized_name;
1105 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001106 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001107 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001108 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001109 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001110
Hu Taofc7a5802014-09-09 13:28:01 +08001111 hpagesize = gethugepagesize(path, &local_err);
1112 if (local_err) {
1113 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001114 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001115 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001116 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001117
1118 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001119 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1120 "or larger than huge page size 0x%" PRIx64,
1121 memory, hpagesize);
1122 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001123 }
1124
1125 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001126 error_setg(errp,
1127 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001128 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001129 }
1130
Peter Feiner8ca761f2013-03-04 13:54:25 -05001131 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001132 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001133 for (c = sanitized_name; *c != '\0'; c++) {
1134 if (*c == '/')
1135 *c = '_';
1136 }
1137
1138 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1139 sanitized_name);
1140 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001141
1142 fd = mkstemp(filename);
1143 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001144 error_setg_errno(errp, errno,
1145 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001146 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001147 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001148 }
1149 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001150 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001151
1152 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1153
1154 /*
1155 * ftruncate is not supported by hugetlbfs in older
1156 * hosts, so don't bother bailing out on errors.
1157 * If anything goes wrong with it under other filesystems,
1158 * mmap will fail.
1159 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001160 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001161 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001162 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001163
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001164 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1165 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1166 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001167 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001168 error_setg_errno(errp, errno,
1169 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001170 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001171 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001172 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001173
1174 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001175 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001176 }
1177
Alex Williamson04b16652010-07-02 11:13:17 -06001178 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001179 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001180
1181error:
1182 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001183 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001184 exit(1);
1185 }
1186 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001187}
1188#endif
1189
Mike Day0dc3f442013-09-05 14:41:35 -04001190/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001191static ram_addr_t find_ram_offset(ram_addr_t size)
1192{
Alex Williamson04b16652010-07-02 11:13:17 -06001193 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001194 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001195
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001196 assert(size != 0); /* it would hand out same offset multiple times */
1197
Mike Day0dc3f442013-09-05 14:41:35 -04001198 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001199 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001200 }
Alex Williamson04b16652010-07-02 11:13:17 -06001201
Mike Day0dc3f442013-09-05 14:41:35 -04001202 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001203 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001204
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001205 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001206
Mike Day0dc3f442013-09-05 14:41:35 -04001207 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001208 if (next_block->offset >= end) {
1209 next = MIN(next, next_block->offset);
1210 }
1211 }
1212 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001213 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001214 mingap = next - end;
1215 }
1216 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001217
1218 if (offset == RAM_ADDR_MAX) {
1219 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1220 (uint64_t)size);
1221 abort();
1222 }
1223
Alex Williamson04b16652010-07-02 11:13:17 -06001224 return offset;
1225}
1226
Juan Quintela652d7ec2012-07-20 10:37:54 +02001227ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001228{
Alex Williamsond17b5282010-06-25 11:08:38 -06001229 RAMBlock *block;
1230 ram_addr_t last = 0;
1231
Mike Day0dc3f442013-09-05 14:41:35 -04001232 rcu_read_lock();
1233 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001234 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001235 }
Mike Day0dc3f442013-09-05 14:41:35 -04001236 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001237 return last;
1238}
1239
Jason Baronddb97f12012-08-02 15:44:16 -04001240static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1241{
1242 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001243
1244 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001245 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001246 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1247 if (ret) {
1248 perror("qemu_madvise");
1249 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1250 "but dump_guest_core=off specified\n");
1251 }
1252 }
1253}
1254
Mike Day0dc3f442013-09-05 14:41:35 -04001255/* Called within an RCU critical section, or while the ramlist lock
1256 * is held.
1257 */
Hu Tao20cfe882014-04-02 15:13:26 +08001258static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001259{
Hu Tao20cfe882014-04-02 15:13:26 +08001260 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001261
Mike Day0dc3f442013-09-05 14:41:35 -04001262 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001263 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001264 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001265 }
1266 }
Hu Tao20cfe882014-04-02 15:13:26 +08001267
1268 return NULL;
1269}
1270
Mike Dayae3a7042013-09-05 14:41:35 -04001271/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001272void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1273{
Mike Dayae3a7042013-09-05 14:41:35 -04001274 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001275
Mike Day0dc3f442013-09-05 14:41:35 -04001276 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001277 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001278 assert(new_block);
1279 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001280
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001281 if (dev) {
1282 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001283 if (id) {
1284 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001285 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001286 }
1287 }
1288 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1289
Mike Day0dc3f442013-09-05 14:41:35 -04001290 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001291 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001292 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1293 new_block->idstr);
1294 abort();
1295 }
1296 }
Mike Day0dc3f442013-09-05 14:41:35 -04001297 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001298}
1299
Mike Dayae3a7042013-09-05 14:41:35 -04001300/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001301void qemu_ram_unset_idstr(ram_addr_t addr)
1302{
Mike Dayae3a7042013-09-05 14:41:35 -04001303 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001304
Mike Dayae3a7042013-09-05 14:41:35 -04001305 /* FIXME: arch_init.c assumes that this is not called throughout
1306 * migration. Ignore the problem since hot-unplug during migration
1307 * does not work anyway.
1308 */
1309
Mike Day0dc3f442013-09-05 14:41:35 -04001310 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001311 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001312 if (block) {
1313 memset(block->idstr, 0, sizeof(block->idstr));
1314 }
Mike Day0dc3f442013-09-05 14:41:35 -04001315 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001316}
1317
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001318static int memory_try_enable_merging(void *addr, size_t len)
1319{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001320 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001321 /* disabled by the user */
1322 return 0;
1323 }
1324
1325 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1326}
1327
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001328/* Only legal before guest might have detected the memory size: e.g. on
1329 * incoming migration, or right after reset.
1330 *
1331 * As memory core doesn't know how is memory accessed, it is up to
1332 * resize callback to update device state and/or add assertions to detect
1333 * misuse, if necessary.
1334 */
1335int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1336{
1337 RAMBlock *block = find_ram_block(base);
1338
1339 assert(block);
1340
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001341 newsize = TARGET_PAGE_ALIGN(newsize);
1342
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001343 if (block->used_length == newsize) {
1344 return 0;
1345 }
1346
1347 if (!(block->flags & RAM_RESIZEABLE)) {
1348 error_setg_errno(errp, EINVAL,
1349 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1350 " in != 0x" RAM_ADDR_FMT, block->idstr,
1351 newsize, block->used_length);
1352 return -EINVAL;
1353 }
1354
1355 if (block->max_length < newsize) {
1356 error_setg_errno(errp, EINVAL,
1357 "Length too large: %s: 0x" RAM_ADDR_FMT
1358 " > 0x" RAM_ADDR_FMT, block->idstr,
1359 newsize, block->max_length);
1360 return -EINVAL;
1361 }
1362
1363 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1364 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001365 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1366 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001367 memory_region_set_size(block->mr, newsize);
1368 if (block->resized) {
1369 block->resized(block->idstr, newsize, block->host);
1370 }
1371 return 0;
1372}
1373
Hu Taoef701d72014-09-09 13:27:54 +08001374static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001375{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001376 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001377 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001378 ram_addr_t old_ram_size, new_ram_size;
1379
1380 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001381
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001382 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001383 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001384
1385 if (!new_block->host) {
1386 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001387 xen_ram_alloc(new_block->offset, new_block->max_length,
1388 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001389 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001390 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001391 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001392 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001393 error_setg_errno(errp, errno,
1394 "cannot set up guest memory '%s'",
1395 memory_region_name(new_block->mr));
1396 qemu_mutex_unlock_ramlist();
1397 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001398 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001399 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001400 }
1401 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001402
Mike Day0d53d9f2015-01-21 13:45:24 +01001403 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1404 * QLIST (which has an RCU-friendly variant) does not have insertion at
1405 * tail, so save the last element in last_block.
1406 */
Mike Day0dc3f442013-09-05 14:41:35 -04001407 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001408 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001409 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001410 break;
1411 }
1412 }
1413 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001414 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001415 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001416 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001417 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001418 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001419 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001420 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001421
Mike Day0dc3f442013-09-05 14:41:35 -04001422 /* Write list before version */
1423 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001424 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001425 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001426
Juan Quintela2152f5c2013-10-08 13:52:02 +02001427 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1428
1429 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001430 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001431
1432 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001433 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1434 ram_list.dirty_memory[i] =
1435 bitmap_zero_extend(ram_list.dirty_memory[i],
1436 old_ram_size, new_ram_size);
1437 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001438 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001439 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001440 new_block->used_length,
1441 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001442
Paolo Bonzinia904c912015-01-21 16:18:35 +01001443 if (new_block->host) {
1444 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1445 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1446 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1447 if (kvm_enabled()) {
1448 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1449 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001450 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001451
1452 return new_block->offset;
1453}
1454
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001455#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001456ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001457 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001458 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001459{
1460 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001461 ram_addr_t addr;
1462 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001463
1464 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001465 error_setg(errp, "-mem-path not supported with Xen");
1466 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001467 }
1468
1469 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1470 /*
1471 * file_ram_alloc() needs to allocate just like
1472 * phys_mem_alloc, but we haven't bothered to provide
1473 * a hook there.
1474 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001475 error_setg(errp,
1476 "-mem-path not supported with this accelerator");
1477 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001478 }
1479
1480 size = TARGET_PAGE_ALIGN(size);
1481 new_block = g_malloc0(sizeof(*new_block));
1482 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001483 new_block->used_length = size;
1484 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001485 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001486 new_block->host = file_ram_alloc(new_block, size,
1487 mem_path, errp);
1488 if (!new_block->host) {
1489 g_free(new_block);
1490 return -1;
1491 }
1492
Hu Taoef701d72014-09-09 13:27:54 +08001493 addr = ram_block_add(new_block, &local_err);
1494 if (local_err) {
1495 g_free(new_block);
1496 error_propagate(errp, local_err);
1497 return -1;
1498 }
1499 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001500}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001501#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001502
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001503static
1504ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1505 void (*resized)(const char*,
1506 uint64_t length,
1507 void *host),
1508 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001509 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001510{
1511 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001512 ram_addr_t addr;
1513 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001514
1515 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001516 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001517 new_block = g_malloc0(sizeof(*new_block));
1518 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001519 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001520 new_block->used_length = size;
1521 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001522 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001523 new_block->fd = -1;
1524 new_block->host = host;
1525 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001526 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001527 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001528 if (resizeable) {
1529 new_block->flags |= RAM_RESIZEABLE;
1530 }
Hu Taoef701d72014-09-09 13:27:54 +08001531 addr = ram_block_add(new_block, &local_err);
1532 if (local_err) {
1533 g_free(new_block);
1534 error_propagate(errp, local_err);
1535 return -1;
1536 }
1537 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001538}
1539
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001540ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1541 MemoryRegion *mr, Error **errp)
1542{
1543 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1544}
1545
Hu Taoef701d72014-09-09 13:27:54 +08001546ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001547{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001548 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1549}
1550
1551ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1552 void (*resized)(const char*,
1553 uint64_t length,
1554 void *host),
1555 MemoryRegion *mr, Error **errp)
1556{
1557 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001558}
bellarde9a1ab12007-02-08 23:08:38 +00001559
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001560void qemu_ram_free_from_ptr(ram_addr_t addr)
1561{
1562 RAMBlock *block;
1563
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001564 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001565 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001566 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001567 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001568 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001569 /* Write list before version */
1570 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001571 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001572 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001573 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001574 }
1575 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001576 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001577}
1578
Paolo Bonzini43771532013-09-09 17:58:40 +02001579static void reclaim_ramblock(RAMBlock *block)
1580{
1581 if (block->flags & RAM_PREALLOC) {
1582 ;
1583 } else if (xen_enabled()) {
1584 xen_invalidate_map_cache_entry(block->host);
1585#ifndef _WIN32
1586 } else if (block->fd >= 0) {
1587 munmap(block->host, block->max_length);
1588 close(block->fd);
1589#endif
1590 } else {
1591 qemu_anon_ram_free(block->host, block->max_length);
1592 }
1593 g_free(block);
1594}
1595
Anthony Liguoric227f092009-10-01 16:12:16 -05001596void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001597{
Alex Williamson04b16652010-07-02 11:13:17 -06001598 RAMBlock *block;
1599
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001600 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001601 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001602 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001603 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001604 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001605 /* Write list before version */
1606 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001607 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001608 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001609 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001610 }
1611 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001612 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001613}
1614
Huang Yingcd19cfa2011-03-02 08:56:19 +01001615#ifndef _WIN32
1616void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1617{
1618 RAMBlock *block;
1619 ram_addr_t offset;
1620 int flags;
1621 void *area, *vaddr;
1622
Mike Day0dc3f442013-09-05 14:41:35 -04001623 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001624 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001625 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001626 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001627 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001628 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001629 } else if (xen_enabled()) {
1630 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001631 } else {
1632 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001633 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001634 flags |= (block->flags & RAM_SHARED ?
1635 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001636 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1637 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001638 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001639 /*
1640 * Remap needs to match alloc. Accelerators that
1641 * set phys_mem_alloc never remap. If they did,
1642 * we'd need a remap hook here.
1643 */
1644 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1645
Huang Yingcd19cfa2011-03-02 08:56:19 +01001646 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1647 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1648 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001649 }
1650 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001651 fprintf(stderr, "Could not remap addr: "
1652 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001653 length, addr);
1654 exit(1);
1655 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001656 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001657 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001658 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001659 }
1660 }
1661}
1662#endif /* !_WIN32 */
1663
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001664int qemu_get_ram_fd(ram_addr_t addr)
1665{
Mike Dayae3a7042013-09-05 14:41:35 -04001666 RAMBlock *block;
1667 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001668
Mike Day0dc3f442013-09-05 14:41:35 -04001669 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001670 block = qemu_get_ram_block(addr);
1671 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001672 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001673 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001674}
1675
Damjan Marion3fd74b82014-06-26 23:01:32 +02001676void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1677{
Mike Dayae3a7042013-09-05 14:41:35 -04001678 RAMBlock *block;
1679 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001680
Mike Day0dc3f442013-09-05 14:41:35 -04001681 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001682 block = qemu_get_ram_block(addr);
1683 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001684 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001685 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001686}
1687
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001688/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001689 * This should not be used for general purpose DMA. Use address_space_map
1690 * or address_space_rw instead. For local memory (e.g. video ram) that the
1691 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001692 *
1693 * By the time this function returns, the returned pointer is not protected
1694 * by RCU anymore. If the caller is not within an RCU critical section and
1695 * does not hold the iothread lock, it must have other means of protecting the
1696 * pointer, such as a reference to the region that includes the incoming
1697 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001698 */
1699void *qemu_get_ram_ptr(ram_addr_t addr)
1700{
Mike Dayae3a7042013-09-05 14:41:35 -04001701 RAMBlock *block;
1702 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001703
Mike Day0dc3f442013-09-05 14:41:35 -04001704 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001705 block = qemu_get_ram_block(addr);
1706
1707 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001708 /* We need to check if the requested address is in the RAM
1709 * because we don't want to map the entire memory in QEMU.
1710 * In that case just map until the end of the page.
1711 */
1712 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001713 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001714 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001715 }
Mike Dayae3a7042013-09-05 14:41:35 -04001716
1717 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001718 }
Mike Dayae3a7042013-09-05 14:41:35 -04001719 ptr = ramblock_ptr(block, addr - block->offset);
1720
Mike Day0dc3f442013-09-05 14:41:35 -04001721unlock:
1722 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001723 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001724}
1725
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001726/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001727 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001728 *
1729 * By the time this function returns, the returned pointer is not protected
1730 * by RCU anymore. If the caller is not within an RCU critical section and
1731 * does not hold the iothread lock, it must have other means of protecting the
1732 * pointer, such as a reference to the region that includes the incoming
1733 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001734 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001735static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001736{
Mike Dayae3a7042013-09-05 14:41:35 -04001737 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001738 if (*size == 0) {
1739 return NULL;
1740 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001741 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001742 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001743 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001744 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001745 rcu_read_lock();
1746 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001747 if (addr - block->offset < block->max_length) {
1748 if (addr - block->offset + *size > block->max_length)
1749 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001750 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001751 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001752 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001753 }
1754 }
1755
1756 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1757 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001758 }
1759}
1760
Paolo Bonzini7443b432013-06-03 12:44:02 +02001761/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001762 * (typically a TLB entry) back to a ram offset.
1763 *
1764 * By the time this function returns, the returned pointer is not protected
1765 * by RCU anymore. If the caller is not within an RCU critical section and
1766 * does not hold the iothread lock, it must have other means of protecting the
1767 * pointer, such as a reference to the region that includes the incoming
1768 * ram_addr_t.
1769 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001770MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001771{
pbrook94a6b542009-04-11 17:15:54 +00001772 RAMBlock *block;
1773 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001774 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001775
Jan Kiszka868bb332011-06-21 22:59:09 +02001776 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001777 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001778 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001779 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001780 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001781 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001782 }
1783
Mike Day0dc3f442013-09-05 14:41:35 -04001784 rcu_read_lock();
1785 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001786 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001787 goto found;
1788 }
1789
Mike Day0dc3f442013-09-05 14:41:35 -04001790 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001791 /* This case append when the block is not mapped. */
1792 if (block->host == NULL) {
1793 continue;
1794 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001795 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001796 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001797 }
pbrook94a6b542009-04-11 17:15:54 +00001798 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001799
Mike Day0dc3f442013-09-05 14:41:35 -04001800 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001801 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001802
1803found:
1804 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001805 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001806 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001807 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001808}
Alex Williamsonf471a172010-06-11 11:11:42 -06001809
Avi Kivitya8170e52012-10-23 12:30:10 +02001810static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001811 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001812{
Juan Quintela52159192013-10-08 12:44:04 +02001813 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001814 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001815 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001816 switch (size) {
1817 case 1:
1818 stb_p(qemu_get_ram_ptr(ram_addr), val);
1819 break;
1820 case 2:
1821 stw_p(qemu_get_ram_ptr(ram_addr), val);
1822 break;
1823 case 4:
1824 stl_p(qemu_get_ram_ptr(ram_addr), val);
1825 break;
1826 default:
1827 abort();
1828 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001829 /* Set both VGA and migration bits for simplicity and to remove
1830 * the notdirty callback faster.
1831 */
1832 cpu_physical_memory_set_dirty_range(ram_addr, size,
1833 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001834 /* we remove the notdirty callback only if the code has been
1835 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001836 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001837 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001838 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001839 }
bellard1ccde1c2004-02-06 19:46:14 +00001840}
1841
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001842static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1843 unsigned size, bool is_write)
1844{
1845 return is_write;
1846}
1847
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001848static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001849 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001850 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001851 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001852};
1853
pbrook0f459d12008-06-09 00:20:13 +00001854/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001855static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001856{
Andreas Färber93afead2013-08-26 03:41:01 +02001857 CPUState *cpu = current_cpu;
1858 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001859 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001860 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001861 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001862 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001863
Andreas Färberff4700b2013-08-26 18:23:18 +02001864 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001865 /* We re-entered the check after replacing the TB. Now raise
1866 * the debug interrupt so that is will trigger after the
1867 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001868 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001869 return;
1870 }
Andreas Färber93afead2013-08-26 03:41:01 +02001871 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001872 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001873 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1874 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001875 if (flags == BP_MEM_READ) {
1876 wp->flags |= BP_WATCHPOINT_HIT_READ;
1877 } else {
1878 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1879 }
1880 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001881 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001882 if (!cpu->watchpoint_hit) {
1883 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001884 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001885 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001886 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001887 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001888 } else {
1889 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001890 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001891 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001892 }
aliguori06d55cc2008-11-18 20:24:06 +00001893 }
aliguori6e140f22008-11-18 20:37:55 +00001894 } else {
1895 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001896 }
1897 }
1898}
1899
pbrook6658ffb2007-03-16 23:58:11 +00001900/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1901 so these check for a hit then pass through to the normal out-of-line
1902 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001903static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1904 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001905{
Peter Maydell66b9b432015-04-26 16:49:24 +01001906 MemTxResult res;
1907 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001908
Peter Maydell66b9b432015-04-26 16:49:24 +01001909 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001910 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001911 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001912 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001913 break;
1914 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01001915 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001916 break;
1917 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01001918 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001919 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001920 default: abort();
1921 }
Peter Maydell66b9b432015-04-26 16:49:24 +01001922 *pdata = data;
1923 return res;
1924}
1925
1926static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1927 uint64_t val, unsigned size,
1928 MemTxAttrs attrs)
1929{
1930 MemTxResult res;
1931
1932 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1933 switch (size) {
1934 case 1:
1935 address_space_stb(&address_space_memory, addr, val, attrs, &res);
1936 break;
1937 case 2:
1938 address_space_stw(&address_space_memory, addr, val, attrs, &res);
1939 break;
1940 case 4:
1941 address_space_stl(&address_space_memory, addr, val, attrs, &res);
1942 break;
1943 default: abort();
1944 }
1945 return res;
pbrook6658ffb2007-03-16 23:58:11 +00001946}
1947
Avi Kivity1ec9b902012-01-02 12:47:48 +02001948static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01001949 .read_with_attrs = watch_mem_read,
1950 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001951 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001952};
pbrook6658ffb2007-03-16 23:58:11 +00001953
Peter Maydellf25a49e2015-04-26 16:49:24 +01001954static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1955 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00001956{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001957 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001958 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01001959 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001960
blueswir1db7b5422007-05-26 17:36:03 +00001961#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001962 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001963 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001964#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01001965 res = address_space_read(subpage->as, addr + subpage->base,
1966 attrs, buf, len);
1967 if (res) {
1968 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01001969 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001970 switch (len) {
1971 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001972 *data = ldub_p(buf);
1973 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001974 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001975 *data = lduw_p(buf);
1976 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001977 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001978 *data = ldl_p(buf);
1979 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001980 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001981 *data = ldq_p(buf);
1982 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001983 default:
1984 abort();
1985 }
blueswir1db7b5422007-05-26 17:36:03 +00001986}
1987
Peter Maydellf25a49e2015-04-26 16:49:24 +01001988static MemTxResult subpage_write(void *opaque, hwaddr addr,
1989 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00001990{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001991 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001992 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001993
blueswir1db7b5422007-05-26 17:36:03 +00001994#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001995 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001996 " value %"PRIx64"\n",
1997 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001998#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001999 switch (len) {
2000 case 1:
2001 stb_p(buf, value);
2002 break;
2003 case 2:
2004 stw_p(buf, value);
2005 break;
2006 case 4:
2007 stl_p(buf, value);
2008 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002009 case 8:
2010 stq_p(buf, value);
2011 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002012 default:
2013 abort();
2014 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002015 return address_space_write(subpage->as, addr + subpage->base,
2016 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002017}
2018
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002019static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002020 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002021{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002022 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002023#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002024 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002025 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002026#endif
2027
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002028 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002029 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002030}
2031
Avi Kivity70c68e42012-01-02 12:32:48 +02002032static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002033 .read_with_attrs = subpage_read,
2034 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002035 .impl.min_access_size = 1,
2036 .impl.max_access_size = 8,
2037 .valid.min_access_size = 1,
2038 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002039 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002040 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002041};
2042
Anthony Liguoric227f092009-10-01 16:12:16 -05002043static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002044 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002045{
2046 int idx, eidx;
2047
2048 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2049 return -1;
2050 idx = SUBPAGE_IDX(start);
2051 eidx = SUBPAGE_IDX(end);
2052#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002053 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2054 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002055#endif
blueswir1db7b5422007-05-26 17:36:03 +00002056 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002057 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002058 }
2059
2060 return 0;
2061}
2062
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002063static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002064{
Anthony Liguoric227f092009-10-01 16:12:16 -05002065 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002066
Anthony Liguori7267c092011-08-20 22:09:37 -05002067 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002068
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002069 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002070 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002071 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002072 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002073 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002074#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002075 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2076 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002077#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002078 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002079
2080 return mmio;
2081}
2082
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002083static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2084 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002085{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002086 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002087 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002088 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002089 .mr = mr,
2090 .offset_within_address_space = 0,
2091 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002092 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002093 };
2094
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002095 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002096}
2097
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002098MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002099{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002100 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2101 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002102
2103 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002104}
2105
Avi Kivitye9179ce2009-06-14 11:38:52 +03002106static void io_mem_init(void)
2107{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002108 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002109 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002110 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002111 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002112 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002113 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002114 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002115}
2116
Avi Kivityac1970f2012-10-03 16:22:53 +02002117static void mem_begin(MemoryListener *listener)
2118{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002119 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002120 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2121 uint16_t n;
2122
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002123 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002124 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002125 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002126 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002127 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002128 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002129 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002130 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002131
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002132 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002133 d->as = as;
2134 as->next_dispatch = d;
2135}
2136
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002137static void address_space_dispatch_free(AddressSpaceDispatch *d)
2138{
2139 phys_sections_free(&d->map);
2140 g_free(d);
2141}
2142
Paolo Bonzini00752702013-05-29 12:13:54 +02002143static void mem_commit(MemoryListener *listener)
2144{
2145 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002146 AddressSpaceDispatch *cur = as->dispatch;
2147 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002148
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002149 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002150
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002151 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002152 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002153 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002154 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002155}
2156
Avi Kivity1d711482012-10-02 18:54:45 +02002157static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002158{
Andreas Färber182735e2013-05-29 22:29:20 +02002159 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002160
2161 /* since each CPU stores ram addresses in its TLB cache, we must
2162 reset the modified entries */
2163 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002164 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002165 /* FIXME: Disentangle the cpu.h circular files deps so we can
2166 directly get the right CPU from listener. */
2167 if (cpu->tcg_as_listener != listener) {
2168 continue;
2169 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002170 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002171 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002172}
2173
Avi Kivityac1970f2012-10-03 16:22:53 +02002174void address_space_init_dispatch(AddressSpace *as)
2175{
Paolo Bonzini00752702013-05-29 12:13:54 +02002176 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002177 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002178 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002179 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002180 .region_add = mem_add,
2181 .region_nop = mem_add,
2182 .priority = 0,
2183 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002184 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002185}
2186
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002187void address_space_unregister(AddressSpace *as)
2188{
2189 memory_listener_unregister(&as->dispatch_listener);
2190}
2191
Avi Kivity83f3c252012-10-07 12:59:55 +02002192void address_space_destroy_dispatch(AddressSpace *as)
2193{
2194 AddressSpaceDispatch *d = as->dispatch;
2195
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002196 atomic_rcu_set(&as->dispatch, NULL);
2197 if (d) {
2198 call_rcu(d, address_space_dispatch_free, rcu);
2199 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002200}
2201
Avi Kivity62152b82011-07-26 14:26:14 +03002202static void memory_map_init(void)
2203{
Anthony Liguori7267c092011-08-20 22:09:37 -05002204 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002205
Paolo Bonzini57271d62013-11-07 17:14:37 +01002206 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002207 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002208
Anthony Liguori7267c092011-08-20 22:09:37 -05002209 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002210 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2211 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002212 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002213}
2214
2215MemoryRegion *get_system_memory(void)
2216{
2217 return system_memory;
2218}
2219
Avi Kivity309cb472011-08-08 16:09:03 +03002220MemoryRegion *get_system_io(void)
2221{
2222 return system_io;
2223}
2224
pbrooke2eef172008-06-08 01:09:01 +00002225#endif /* !defined(CONFIG_USER_ONLY) */
2226
bellard13eb76e2004-01-24 15:23:36 +00002227/* physical memory access (slow version, mainly for debug) */
2228#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002229int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002230 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002231{
2232 int l, flags;
2233 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002234 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002235
2236 while (len > 0) {
2237 page = addr & TARGET_PAGE_MASK;
2238 l = (page + TARGET_PAGE_SIZE) - addr;
2239 if (l > len)
2240 l = len;
2241 flags = page_get_flags(page);
2242 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002243 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002244 if (is_write) {
2245 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002246 return -1;
bellard579a97f2007-11-11 14:26:47 +00002247 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002248 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002249 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002250 memcpy(p, buf, l);
2251 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002252 } else {
2253 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002254 return -1;
bellard579a97f2007-11-11 14:26:47 +00002255 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002256 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002257 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002258 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002259 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002260 }
2261 len -= l;
2262 buf += l;
2263 addr += l;
2264 }
Paul Brooka68fe892010-03-01 00:08:59 +00002265 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002266}
bellard8df1cd02005-01-28 22:37:22 +00002267
bellard13eb76e2004-01-24 15:23:36 +00002268#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002269
Paolo Bonzini845b6212015-03-23 11:45:53 +01002270static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002271 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002272{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002273 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2274 /* No early return if dirty_log_mask is or becomes 0, because
2275 * cpu_physical_memory_set_dirty_range will still call
2276 * xen_modified_memory.
2277 */
2278 if (dirty_log_mask) {
2279 dirty_log_mask =
2280 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002281 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002282 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2283 tb_invalidate_phys_range(addr, addr + length);
2284 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2285 }
2286 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002287}
2288
Richard Henderson23326162013-07-08 14:55:59 -07002289static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002290{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002291 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002292
2293 /* Regions are assumed to support 1-4 byte accesses unless
2294 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002295 if (access_size_max == 0) {
2296 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002297 }
Richard Henderson23326162013-07-08 14:55:59 -07002298
2299 /* Bound the maximum access by the alignment of the address. */
2300 if (!mr->ops->impl.unaligned) {
2301 unsigned align_size_max = addr & -addr;
2302 if (align_size_max != 0 && align_size_max < access_size_max) {
2303 access_size_max = align_size_max;
2304 }
2305 }
2306
2307 /* Don't attempt accesses larger than the maximum. */
2308 if (l > access_size_max) {
2309 l = access_size_max;
2310 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002311 if (l & (l - 1)) {
2312 l = 1 << (qemu_fls(l) - 1);
2313 }
Richard Henderson23326162013-07-08 14:55:59 -07002314
2315 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002316}
2317
Peter Maydell5c9eb022015-04-26 16:49:24 +01002318MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2319 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002320{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002321 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002322 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002323 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002324 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002325 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002326 MemTxResult result = MEMTX_OK;
ths3b46e622007-09-17 08:09:54 +00002327
Paolo Bonzini41063e12015-03-18 14:21:43 +01002328 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002329 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002330 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002331 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002332
bellard13eb76e2004-01-24 15:23:36 +00002333 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002334 if (!memory_access_is_direct(mr, is_write)) {
2335 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002336 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002337 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002338 switch (l) {
2339 case 8:
2340 /* 64 bit write access */
2341 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002342 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2343 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002344 break;
2345 case 4:
bellard1c213d12005-09-03 10:49:04 +00002346 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002347 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002348 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2349 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002350 break;
2351 case 2:
bellard1c213d12005-09-03 10:49:04 +00002352 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002353 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002354 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2355 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002356 break;
2357 case 1:
bellard1c213d12005-09-03 10:49:04 +00002358 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002359 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002360 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2361 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002362 break;
2363 default:
2364 abort();
bellard13eb76e2004-01-24 15:23:36 +00002365 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002366 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002367 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002368 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002369 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002370 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002371 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002372 }
2373 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002374 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002375 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002376 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002377 switch (l) {
2378 case 8:
2379 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002380 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2381 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002382 stq_p(buf, val);
2383 break;
2384 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002385 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002386 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2387 attrs);
bellardc27004e2005-01-03 23:35:10 +00002388 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002389 break;
2390 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002391 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002392 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2393 attrs);
bellardc27004e2005-01-03 23:35:10 +00002394 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002395 break;
2396 case 1:
bellard1c213d12005-09-03 10:49:04 +00002397 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002398 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2399 attrs);
bellardc27004e2005-01-03 23:35:10 +00002400 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002401 break;
2402 default:
2403 abort();
bellard13eb76e2004-01-24 15:23:36 +00002404 }
2405 } else {
2406 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002407 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002408 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002409 }
2410 }
2411 len -= l;
2412 buf += l;
2413 addr += l;
2414 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002415 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002416
Peter Maydell3b643492015-04-26 16:49:23 +01002417 return result;
bellard13eb76e2004-01-24 15:23:36 +00002418}
bellard8df1cd02005-01-28 22:37:22 +00002419
Peter Maydell5c9eb022015-04-26 16:49:24 +01002420MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2421 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002422{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002423 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002424}
2425
Peter Maydell5c9eb022015-04-26 16:49:24 +01002426MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2427 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002428{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002429 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002430}
2431
2432
Avi Kivitya8170e52012-10-23 12:30:10 +02002433void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002434 int len, int is_write)
2435{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002436 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2437 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002438}
2439
Alexander Graf582b55a2013-12-11 14:17:44 +01002440enum write_rom_type {
2441 WRITE_DATA,
2442 FLUSH_CACHE,
2443};
2444
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002445static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002446 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002447{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002448 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002449 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002450 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002451 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002452
Paolo Bonzini41063e12015-03-18 14:21:43 +01002453 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002454 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002455 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002456 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002457
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002458 if (!(memory_region_is_ram(mr) ||
2459 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002460 /* do nothing */
2461 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002462 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002463 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002464 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002465 switch (type) {
2466 case WRITE_DATA:
2467 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002468 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002469 break;
2470 case FLUSH_CACHE:
2471 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2472 break;
2473 }
bellardd0ecd2a2006-04-23 17:14:48 +00002474 }
2475 len -= l;
2476 buf += l;
2477 addr += l;
2478 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002479 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002480}
2481
Alexander Graf582b55a2013-12-11 14:17:44 +01002482/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002483void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002484 const uint8_t *buf, int len)
2485{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002486 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002487}
2488
2489void cpu_flush_icache_range(hwaddr start, int len)
2490{
2491 /*
2492 * This function should do the same thing as an icache flush that was
2493 * triggered from within the guest. For TCG we are always cache coherent,
2494 * so there is no need to flush anything. For KVM / Xen we need to flush
2495 * the host's instruction cache at least.
2496 */
2497 if (tcg_enabled()) {
2498 return;
2499 }
2500
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002501 cpu_physical_memory_write_rom_internal(&address_space_memory,
2502 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002503}
2504
aliguori6d16c2f2009-01-22 16:59:11 +00002505typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002506 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002507 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002508 hwaddr addr;
2509 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002510 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002511} BounceBuffer;
2512
2513static BounceBuffer bounce;
2514
aliguoriba223c22009-01-22 16:59:16 +00002515typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002516 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002517 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002518} MapClient;
2519
Fam Zheng38e047b2015-03-16 17:03:35 +08002520QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002521static QLIST_HEAD(map_client_list, MapClient) map_client_list
2522 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002523
Fam Zhenge95205e2015-03-16 17:03:37 +08002524static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002525{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002526 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002527 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002528}
2529
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002530static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002531{
2532 MapClient *client;
2533
Blue Swirl72cf2d42009-09-12 07:36:22 +00002534 while (!QLIST_EMPTY(&map_client_list)) {
2535 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002536 qemu_bh_schedule(client->bh);
2537 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002538 }
2539}
2540
Fam Zhenge95205e2015-03-16 17:03:37 +08002541void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002542{
2543 MapClient *client = g_malloc(sizeof(*client));
2544
Fam Zheng38e047b2015-03-16 17:03:35 +08002545 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002546 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002547 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002548 if (!atomic_read(&bounce.in_use)) {
2549 cpu_notify_map_clients_locked();
2550 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002551 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002552}
2553
Fam Zheng38e047b2015-03-16 17:03:35 +08002554void cpu_exec_init_all(void)
2555{
2556 qemu_mutex_init(&ram_list.mutex);
2557 memory_map_init();
2558 io_mem_init();
2559 qemu_mutex_init(&map_client_list_lock);
2560}
2561
Fam Zhenge95205e2015-03-16 17:03:37 +08002562void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002563{
Fam Zhenge95205e2015-03-16 17:03:37 +08002564 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002565
Fam Zhenge95205e2015-03-16 17:03:37 +08002566 qemu_mutex_lock(&map_client_list_lock);
2567 QLIST_FOREACH(client, &map_client_list, link) {
2568 if (client->bh == bh) {
2569 cpu_unregister_map_client_do(client);
2570 break;
2571 }
2572 }
2573 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002574}
2575
2576static void cpu_notify_map_clients(void)
2577{
Fam Zheng38e047b2015-03-16 17:03:35 +08002578 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002579 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002580 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002581}
2582
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002583bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2584{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002585 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002586 hwaddr l, xlat;
2587
Paolo Bonzini41063e12015-03-18 14:21:43 +01002588 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002589 while (len > 0) {
2590 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002591 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2592 if (!memory_access_is_direct(mr, is_write)) {
2593 l = memory_access_size(mr, l, addr);
2594 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002595 return false;
2596 }
2597 }
2598
2599 len -= l;
2600 addr += l;
2601 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002602 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002603 return true;
2604}
2605
aliguori6d16c2f2009-01-22 16:59:11 +00002606/* Map a physical memory region into a host virtual address.
2607 * May map a subset of the requested range, given by and returned in *plen.
2608 * May return NULL if resources needed to perform the mapping are exhausted.
2609 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002610 * Use cpu_register_map_client() to know when retrying the map operation is
2611 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002612 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002613void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002614 hwaddr addr,
2615 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002616 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002617{
Avi Kivitya8170e52012-10-23 12:30:10 +02002618 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002619 hwaddr done = 0;
2620 hwaddr l, xlat, base;
2621 MemoryRegion *mr, *this_mr;
2622 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002623
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002624 if (len == 0) {
2625 return NULL;
2626 }
aliguori6d16c2f2009-01-22 16:59:11 +00002627
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002628 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002629 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002630 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002631
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002632 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002633 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002634 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002635 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002636 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002637 /* Avoid unbounded allocations */
2638 l = MIN(l, TARGET_PAGE_SIZE);
2639 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002640 bounce.addr = addr;
2641 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002642
2643 memory_region_ref(mr);
2644 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002645 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002646 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2647 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002648 }
aliguori6d16c2f2009-01-22 16:59:11 +00002649
Paolo Bonzini41063e12015-03-18 14:21:43 +01002650 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002651 *plen = l;
2652 return bounce.buffer;
2653 }
2654
2655 base = xlat;
2656 raddr = memory_region_get_ram_addr(mr);
2657
2658 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002659 len -= l;
2660 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002661 done += l;
2662 if (len == 0) {
2663 break;
2664 }
2665
2666 l = len;
2667 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2668 if (this_mr != mr || xlat != base + done) {
2669 break;
2670 }
aliguori6d16c2f2009-01-22 16:59:11 +00002671 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002672
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002673 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002674 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002675 *plen = done;
2676 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002677}
2678
Avi Kivityac1970f2012-10-03 16:22:53 +02002679/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002680 * Will also mark the memory as dirty if is_write == 1. access_len gives
2681 * the amount of memory that was actually read or written by the caller.
2682 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002683void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2684 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002685{
2686 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002687 MemoryRegion *mr;
2688 ram_addr_t addr1;
2689
2690 mr = qemu_ram_addr_from_host(buffer, &addr1);
2691 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002692 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002693 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002694 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002695 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002696 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002697 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002698 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002699 return;
2700 }
2701 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002702 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2703 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002704 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002705 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002706 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002707 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002708 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002709 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002710}
bellardd0ecd2a2006-04-23 17:14:48 +00002711
Avi Kivitya8170e52012-10-23 12:30:10 +02002712void *cpu_physical_memory_map(hwaddr addr,
2713 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002714 int is_write)
2715{
2716 return address_space_map(&address_space_memory, addr, plen, is_write);
2717}
2718
Avi Kivitya8170e52012-10-23 12:30:10 +02002719void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2720 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002721{
2722 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2723}
2724
bellard8df1cd02005-01-28 22:37:22 +00002725/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002726static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2727 MemTxAttrs attrs,
2728 MemTxResult *result,
2729 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002730{
bellard8df1cd02005-01-28 22:37:22 +00002731 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002732 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002733 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002734 hwaddr l = 4;
2735 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002736 MemTxResult r;
bellard8df1cd02005-01-28 22:37:22 +00002737
Paolo Bonzini41063e12015-03-18 14:21:43 +01002738 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002739 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002740 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002741 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002742 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002743#if defined(TARGET_WORDS_BIGENDIAN)
2744 if (endian == DEVICE_LITTLE_ENDIAN) {
2745 val = bswap32(val);
2746 }
2747#else
2748 if (endian == DEVICE_BIG_ENDIAN) {
2749 val = bswap32(val);
2750 }
2751#endif
bellard8df1cd02005-01-28 22:37:22 +00002752 } else {
2753 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002754 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002755 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002756 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002757 switch (endian) {
2758 case DEVICE_LITTLE_ENDIAN:
2759 val = ldl_le_p(ptr);
2760 break;
2761 case DEVICE_BIG_ENDIAN:
2762 val = ldl_be_p(ptr);
2763 break;
2764 default:
2765 val = ldl_p(ptr);
2766 break;
2767 }
Peter Maydell50013112015-04-26 16:49:24 +01002768 r = MEMTX_OK;
2769 }
2770 if (result) {
2771 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002772 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002773 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002774 return val;
2775}
2776
Peter Maydell50013112015-04-26 16:49:24 +01002777uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2778 MemTxAttrs attrs, MemTxResult *result)
2779{
2780 return address_space_ldl_internal(as, addr, attrs, result,
2781 DEVICE_NATIVE_ENDIAN);
2782}
2783
2784uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2785 MemTxAttrs attrs, MemTxResult *result)
2786{
2787 return address_space_ldl_internal(as, addr, attrs, result,
2788 DEVICE_LITTLE_ENDIAN);
2789}
2790
2791uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2792 MemTxAttrs attrs, MemTxResult *result)
2793{
2794 return address_space_ldl_internal(as, addr, attrs, result,
2795 DEVICE_BIG_ENDIAN);
2796}
2797
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002798uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002799{
Peter Maydell50013112015-04-26 16:49:24 +01002800 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002801}
2802
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002803uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002804{
Peter Maydell50013112015-04-26 16:49:24 +01002805 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002806}
2807
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002808uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002809{
Peter Maydell50013112015-04-26 16:49:24 +01002810 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002811}
2812
bellard84b7b8e2005-11-28 21:19:04 +00002813/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002814static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2815 MemTxAttrs attrs,
2816 MemTxResult *result,
2817 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002818{
bellard84b7b8e2005-11-28 21:19:04 +00002819 uint8_t *ptr;
2820 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002821 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002822 hwaddr l = 8;
2823 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002824 MemTxResult r;
bellard84b7b8e2005-11-28 21:19:04 +00002825
Paolo Bonzini41063e12015-03-18 14:21:43 +01002826 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002827 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002828 false);
2829 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002830 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002831 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002832#if defined(TARGET_WORDS_BIGENDIAN)
2833 if (endian == DEVICE_LITTLE_ENDIAN) {
2834 val = bswap64(val);
2835 }
2836#else
2837 if (endian == DEVICE_BIG_ENDIAN) {
2838 val = bswap64(val);
2839 }
2840#endif
bellard84b7b8e2005-11-28 21:19:04 +00002841 } else {
2842 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002843 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002844 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002845 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002846 switch (endian) {
2847 case DEVICE_LITTLE_ENDIAN:
2848 val = ldq_le_p(ptr);
2849 break;
2850 case DEVICE_BIG_ENDIAN:
2851 val = ldq_be_p(ptr);
2852 break;
2853 default:
2854 val = ldq_p(ptr);
2855 break;
2856 }
Peter Maydell50013112015-04-26 16:49:24 +01002857 r = MEMTX_OK;
2858 }
2859 if (result) {
2860 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002861 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002862 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002863 return val;
2864}
2865
Peter Maydell50013112015-04-26 16:49:24 +01002866uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2867 MemTxAttrs attrs, MemTxResult *result)
2868{
2869 return address_space_ldq_internal(as, addr, attrs, result,
2870 DEVICE_NATIVE_ENDIAN);
2871}
2872
2873uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2874 MemTxAttrs attrs, MemTxResult *result)
2875{
2876 return address_space_ldq_internal(as, addr, attrs, result,
2877 DEVICE_LITTLE_ENDIAN);
2878}
2879
2880uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2881 MemTxAttrs attrs, MemTxResult *result)
2882{
2883 return address_space_ldq_internal(as, addr, attrs, result,
2884 DEVICE_BIG_ENDIAN);
2885}
2886
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002887uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002888{
Peter Maydell50013112015-04-26 16:49:24 +01002889 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002890}
2891
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002892uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002893{
Peter Maydell50013112015-04-26 16:49:24 +01002894 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002895}
2896
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002897uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002898{
Peter Maydell50013112015-04-26 16:49:24 +01002899 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002900}
2901
bellardaab33092005-10-30 20:48:42 +00002902/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01002903uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2904 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00002905{
2906 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01002907 MemTxResult r;
2908
2909 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2910 if (result) {
2911 *result = r;
2912 }
bellardaab33092005-10-30 20:48:42 +00002913 return val;
2914}
2915
Peter Maydell50013112015-04-26 16:49:24 +01002916uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2917{
2918 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2919}
2920
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002921/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002922static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2923 hwaddr addr,
2924 MemTxAttrs attrs,
2925 MemTxResult *result,
2926 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002927{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002928 uint8_t *ptr;
2929 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002930 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002931 hwaddr l = 2;
2932 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002933 MemTxResult r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002934
Paolo Bonzini41063e12015-03-18 14:21:43 +01002935 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002936 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002937 false);
2938 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002939 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002940 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002941#if defined(TARGET_WORDS_BIGENDIAN)
2942 if (endian == DEVICE_LITTLE_ENDIAN) {
2943 val = bswap16(val);
2944 }
2945#else
2946 if (endian == DEVICE_BIG_ENDIAN) {
2947 val = bswap16(val);
2948 }
2949#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002950 } else {
2951 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002952 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002953 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002954 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002955 switch (endian) {
2956 case DEVICE_LITTLE_ENDIAN:
2957 val = lduw_le_p(ptr);
2958 break;
2959 case DEVICE_BIG_ENDIAN:
2960 val = lduw_be_p(ptr);
2961 break;
2962 default:
2963 val = lduw_p(ptr);
2964 break;
2965 }
Peter Maydell50013112015-04-26 16:49:24 +01002966 r = MEMTX_OK;
2967 }
2968 if (result) {
2969 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002970 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002971 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002972 return val;
bellardaab33092005-10-30 20:48:42 +00002973}
2974
Peter Maydell50013112015-04-26 16:49:24 +01002975uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2976 MemTxAttrs attrs, MemTxResult *result)
2977{
2978 return address_space_lduw_internal(as, addr, attrs, result,
2979 DEVICE_NATIVE_ENDIAN);
2980}
2981
2982uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2983 MemTxAttrs attrs, MemTxResult *result)
2984{
2985 return address_space_lduw_internal(as, addr, attrs, result,
2986 DEVICE_LITTLE_ENDIAN);
2987}
2988
2989uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2990 MemTxAttrs attrs, MemTxResult *result)
2991{
2992 return address_space_lduw_internal(as, addr, attrs, result,
2993 DEVICE_BIG_ENDIAN);
2994}
2995
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002996uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002997{
Peter Maydell50013112015-04-26 16:49:24 +01002998 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002999}
3000
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003001uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003002{
Peter Maydell50013112015-04-26 16:49:24 +01003003 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003004}
3005
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003006uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003007{
Peter Maydell50013112015-04-26 16:49:24 +01003008 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003009}
3010
bellard8df1cd02005-01-28 22:37:22 +00003011/* warning: addr must be aligned. The ram page is not masked as dirty
3012 and the code inside is not invalidated. It is useful if the dirty
3013 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003014void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3015 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003016{
bellard8df1cd02005-01-28 22:37:22 +00003017 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003018 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003019 hwaddr l = 4;
3020 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003021 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003022 uint8_t dirty_log_mask;
bellard8df1cd02005-01-28 22:37:22 +00003023
Paolo Bonzini41063e12015-03-18 14:21:43 +01003024 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003025 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003026 true);
3027 if (l < 4 || !memory_access_is_direct(mr, true)) {
Peter Maydell50013112015-04-26 16:49:24 +01003028 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003029 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003030 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003031 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003032 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003033
Paolo Bonzini845b6212015-03-23 11:45:53 +01003034 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3035 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003036 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003037 r = MEMTX_OK;
3038 }
3039 if (result) {
3040 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003041 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003042 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003043}
3044
Peter Maydell50013112015-04-26 16:49:24 +01003045void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3046{
3047 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3048}
3049
bellard8df1cd02005-01-28 22:37:22 +00003050/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003051static inline void address_space_stl_internal(AddressSpace *as,
3052 hwaddr addr, uint32_t val,
3053 MemTxAttrs attrs,
3054 MemTxResult *result,
3055 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003056{
bellard8df1cd02005-01-28 22:37:22 +00003057 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003058 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003059 hwaddr l = 4;
3060 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003061 MemTxResult r;
bellard8df1cd02005-01-28 22:37:22 +00003062
Paolo Bonzini41063e12015-03-18 14:21:43 +01003063 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003064 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003065 true);
3066 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003067#if defined(TARGET_WORDS_BIGENDIAN)
3068 if (endian == DEVICE_LITTLE_ENDIAN) {
3069 val = bswap32(val);
3070 }
3071#else
3072 if (endian == DEVICE_BIG_ENDIAN) {
3073 val = bswap32(val);
3074 }
3075#endif
Peter Maydell50013112015-04-26 16:49:24 +01003076 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003077 } else {
bellard8df1cd02005-01-28 22:37:22 +00003078 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003079 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003080 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003081 switch (endian) {
3082 case DEVICE_LITTLE_ENDIAN:
3083 stl_le_p(ptr, val);
3084 break;
3085 case DEVICE_BIG_ENDIAN:
3086 stl_be_p(ptr, val);
3087 break;
3088 default:
3089 stl_p(ptr, val);
3090 break;
3091 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003092 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003093 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003094 }
Peter Maydell50013112015-04-26 16:49:24 +01003095 if (result) {
3096 *result = r;
3097 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003098 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003099}
3100
3101void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3102 MemTxAttrs attrs, MemTxResult *result)
3103{
3104 address_space_stl_internal(as, addr, val, attrs, result,
3105 DEVICE_NATIVE_ENDIAN);
3106}
3107
3108void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3109 MemTxAttrs attrs, MemTxResult *result)
3110{
3111 address_space_stl_internal(as, addr, val, attrs, result,
3112 DEVICE_LITTLE_ENDIAN);
3113}
3114
3115void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3116 MemTxAttrs attrs, MemTxResult *result)
3117{
3118 address_space_stl_internal(as, addr, val, attrs, result,
3119 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003120}
3121
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003122void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003123{
Peter Maydell50013112015-04-26 16:49:24 +01003124 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003125}
3126
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003127void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003128{
Peter Maydell50013112015-04-26 16:49:24 +01003129 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003130}
3131
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003132void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003133{
Peter Maydell50013112015-04-26 16:49:24 +01003134 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003135}
3136
bellardaab33092005-10-30 20:48:42 +00003137/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003138void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3139 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003140{
3141 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003142 MemTxResult r;
3143
3144 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3145 if (result) {
3146 *result = r;
3147 }
3148}
3149
3150void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3151{
3152 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003153}
3154
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003155/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003156static inline void address_space_stw_internal(AddressSpace *as,
3157 hwaddr addr, uint32_t val,
3158 MemTxAttrs attrs,
3159 MemTxResult *result,
3160 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003161{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003162 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003163 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003164 hwaddr l = 2;
3165 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003166 MemTxResult r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003167
Paolo Bonzini41063e12015-03-18 14:21:43 +01003168 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003169 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003170 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003171#if defined(TARGET_WORDS_BIGENDIAN)
3172 if (endian == DEVICE_LITTLE_ENDIAN) {
3173 val = bswap16(val);
3174 }
3175#else
3176 if (endian == DEVICE_BIG_ENDIAN) {
3177 val = bswap16(val);
3178 }
3179#endif
Peter Maydell50013112015-04-26 16:49:24 +01003180 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003181 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003182 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003183 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003184 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003185 switch (endian) {
3186 case DEVICE_LITTLE_ENDIAN:
3187 stw_le_p(ptr, val);
3188 break;
3189 case DEVICE_BIG_ENDIAN:
3190 stw_be_p(ptr, val);
3191 break;
3192 default:
3193 stw_p(ptr, val);
3194 break;
3195 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003196 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003197 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003198 }
Peter Maydell50013112015-04-26 16:49:24 +01003199 if (result) {
3200 *result = r;
3201 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003202 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003203}
3204
3205void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3206 MemTxAttrs attrs, MemTxResult *result)
3207{
3208 address_space_stw_internal(as, addr, val, attrs, result,
3209 DEVICE_NATIVE_ENDIAN);
3210}
3211
3212void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3213 MemTxAttrs attrs, MemTxResult *result)
3214{
3215 address_space_stw_internal(as, addr, val, attrs, result,
3216 DEVICE_LITTLE_ENDIAN);
3217}
3218
3219void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3220 MemTxAttrs attrs, MemTxResult *result)
3221{
3222 address_space_stw_internal(as, addr, val, attrs, result,
3223 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003224}
3225
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003226void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003227{
Peter Maydell50013112015-04-26 16:49:24 +01003228 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003229}
3230
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003231void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003232{
Peter Maydell50013112015-04-26 16:49:24 +01003233 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003234}
3235
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003236void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003237{
Peter Maydell50013112015-04-26 16:49:24 +01003238 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003239}
3240
bellardaab33092005-10-30 20:48:42 +00003241/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003242void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3243 MemTxAttrs attrs, MemTxResult *result)
3244{
3245 MemTxResult r;
3246 val = tswap64(val);
3247 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3248 if (result) {
3249 *result = r;
3250 }
3251}
3252
3253void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3254 MemTxAttrs attrs, MemTxResult *result)
3255{
3256 MemTxResult r;
3257 val = cpu_to_le64(val);
3258 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3259 if (result) {
3260 *result = r;
3261 }
3262}
3263void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3264 MemTxAttrs attrs, MemTxResult *result)
3265{
3266 MemTxResult r;
3267 val = cpu_to_be64(val);
3268 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3269 if (result) {
3270 *result = r;
3271 }
3272}
3273
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003274void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003275{
Peter Maydell50013112015-04-26 16:49:24 +01003276 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003277}
3278
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003279void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003280{
Peter Maydell50013112015-04-26 16:49:24 +01003281 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003282}
3283
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003284void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003285{
Peter Maydell50013112015-04-26 16:49:24 +01003286 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003287}
3288
aliguori5e2972f2009-03-28 17:51:36 +00003289/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003290int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003291 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003292{
3293 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003294 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003295 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003296
3297 while (len > 0) {
3298 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003299 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003300 /* if no physical page mapped, return an error */
3301 if (phys_addr == -1)
3302 return -1;
3303 l = (page + TARGET_PAGE_SIZE) - addr;
3304 if (l > len)
3305 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003306 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003307 if (is_write) {
3308 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3309 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003310 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3311 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003312 }
bellard13eb76e2004-01-24 15:23:36 +00003313 len -= l;
3314 buf += l;
3315 addr += l;
3316 }
3317 return 0;
3318}
Paul Brooka68fe892010-03-01 00:08:59 +00003319#endif
bellard13eb76e2004-01-24 15:23:36 +00003320
Blue Swirl8e4a4242013-01-06 18:30:17 +00003321/*
3322 * A helper function for the _utterly broken_ virtio device model to find out if
3323 * it's running on a big endian machine. Don't do this at home kids!
3324 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003325bool target_words_bigendian(void);
3326bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003327{
3328#if defined(TARGET_WORDS_BIGENDIAN)
3329 return true;
3330#else
3331 return false;
3332#endif
3333}
3334
Wen Congyang76f35532012-05-07 12:04:18 +08003335#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003336bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003337{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003338 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003339 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003340 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003341
Paolo Bonzini41063e12015-03-18 14:21:43 +01003342 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003343 mr = address_space_translate(&address_space_memory,
3344 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003345
Paolo Bonzini41063e12015-03-18 14:21:43 +01003346 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3347 rcu_read_unlock();
3348 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003349}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003350
3351void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3352{
3353 RAMBlock *block;
3354
Mike Day0dc3f442013-09-05 14:41:35 -04003355 rcu_read_lock();
3356 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02003357 func(block->host, block->offset, block->used_length, opaque);
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003358 }
Mike Day0dc3f442013-09-05 14:41:35 -04003359 rcu_read_unlock();
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003360}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003361#endif