blob: fc8d05d50a8231bd37f4ba248b25628cc71474e5 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
58
blueswir1db7b5422007-05-26 17:36:03 +000059//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000060
pbrook99773bd2006-04-16 15:14:59 +000061#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020062static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000063
Mike Day0dc3f442013-09-05 14:41:35 -040064/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
65 * are protected by the ramlist lock.
66 */
Mike Day0d53d9f2015-01-21 13:45:24 +010067RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030068
69static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030070static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030071
Avi Kivityf6790af2012-10-02 20:13:51 +020072AddressSpace address_space_io;
73AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020074
Paolo Bonzini0844e002013-05-24 14:37:28 +020075MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020076static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020077
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080078/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
79#define RAM_PREALLOC (1 << 0)
80
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080081/* RAM is mmap-ed with MAP_SHARED */
82#define RAM_SHARED (1 << 1)
83
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020084/* Only a portion of RAM (used_length) is actually used, and migrated.
85 * This used_length size can change across reboots.
86 */
87#define RAM_RESIZEABLE (1 << 2)
88
pbrooke2eef172008-06-08 01:09:01 +000089#endif
bellard9fa3e852004-01-04 18:06:42 +000090
Andreas Färberbdc44642013-06-24 23:50:24 +020091struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000092/* current CPU in the current thread. It is only valid inside
93 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020094DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000095/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000096 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000097 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010098int use_icount;
bellard6a00d602005-11-21 23:25:50 +000099
pbrooke2eef172008-06-08 01:09:01 +0000100#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200101
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200102typedef struct PhysPageEntry PhysPageEntry;
103
104struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200106 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200109};
110
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
112
Paolo Bonzini03f49952013-11-07 17:14:36 +0100113/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100114#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200116#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117#define P_L2_SIZE (1 << P_L2_BITS)
118
119#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
120
121typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200122
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200123typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100124 struct rcu_head rcu;
125
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132} PhysPageMap;
133
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200134struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100135 struct rcu_head rcu;
136
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200137 /* This is a multi-level map on the physical address space.
138 * The bottom level has pointers to MemoryRegionSections.
139 */
140 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200141 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200142 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143};
144
Jan Kiszka90260c62013-05-26 21:46:51 +0200145#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
146typedef struct subpage_t {
147 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200149 hwaddr base;
150 uint16_t sub_section[TARGET_PAGE_SIZE];
151} subpage_t;
152
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200153#define PHYS_SECTION_UNASSIGNED 0
154#define PHYS_SECTION_NOTDIRTY 1
155#define PHYS_SECTION_ROM 2
156#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200157
pbrooke2eef172008-06-08 01:09:01 +0000158static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300159static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000160static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000161
Avi Kivity1ec9b902012-01-02 12:47:48 +0200162static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000163#endif
bellard54936002003-05-13 00:25:15 +0000164
Paul Brook6d9a1302010-02-28 23:55:53 +0000165#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200166
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200167static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200169 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
171 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
172 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200173 }
174}
175
Paolo Bonzinidb946042015-05-21 15:12:29 +0200176static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200177{
178 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200179 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200180 PhysPageEntry e;
181 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200182
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200183 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200184 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200185 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200187
188 e.skip = leaf ? 0 : 1;
189 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100190 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200191 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200192 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200193 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194}
195
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200196static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
197 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200198 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200199{
200 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100201 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200202
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200203 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200205 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200206 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200208
Paolo Bonzini03f49952013-11-07 17:14:36 +0100209 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200210 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200211 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200212 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200213 *index += step;
214 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200215 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200216 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200217 }
218 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219 }
220}
221
Avi Kivityac1970f2012-10-03 16:22:53 +0200222static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200223 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200224 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000225{
Avi Kivity29990972012-02-13 20:21:20 +0200226 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200227 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000228
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200229 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000230}
231
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200232/* Compact a non leaf page entry. Simply detect that the entry has a single child,
233 * and update our entry so we can skip it and go directly to the destination.
234 */
235static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
236{
237 unsigned valid_ptr = P_L2_SIZE;
238 int valid = 0;
239 PhysPageEntry *p;
240 int i;
241
242 if (lp->ptr == PHYS_MAP_NODE_NIL) {
243 return;
244 }
245
246 p = nodes[lp->ptr];
247 for (i = 0; i < P_L2_SIZE; i++) {
248 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
249 continue;
250 }
251
252 valid_ptr = i;
253 valid++;
254 if (p[i].skip) {
255 phys_page_compact(&p[i], nodes, compacted);
256 }
257 }
258
259 /* We can only compress if there's only one child. */
260 if (valid != 1) {
261 return;
262 }
263
264 assert(valid_ptr < P_L2_SIZE);
265
266 /* Don't compress if it won't fit in the # of bits we have. */
267 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
268 return;
269 }
270
271 lp->ptr = p[valid_ptr].ptr;
272 if (!p[valid_ptr].skip) {
273 /* If our only child is a leaf, make this a leaf. */
274 /* By design, we should have made this node a leaf to begin with so we
275 * should never reach here.
276 * But since it's so simple to handle this, let's do it just in case we
277 * change this rule.
278 */
279 lp->skip = 0;
280 } else {
281 lp->skip += p[valid_ptr].skip;
282 }
283}
284
285static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
286{
287 DECLARE_BITMAP(compacted, nodes_nb);
288
289 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200290 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200291 }
292}
293
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200294static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200295 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000296{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200297 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200298 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200299 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200300
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200301 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200302 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200303 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200304 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200305 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100306 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200307 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308
309 if (sections[lp.ptr].size.hi ||
310 range_covers_byte(sections[lp.ptr].offset_within_address_space,
311 sections[lp.ptr].size.lo, addr)) {
312 return &sections[lp.ptr];
313 } else {
314 return &sections[PHYS_SECTION_UNASSIGNED];
315 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200316}
317
Blue Swirle5548612012-04-21 13:08:33 +0000318bool memory_region_is_unassigned(MemoryRegion *mr)
319{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200320 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000321 && mr != &io_mem_watch;
322}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200323
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100324/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200325static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200326 hwaddr addr,
327 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200328{
Jan Kiszka90260c62013-05-26 21:46:51 +0200329 MemoryRegionSection *section;
330 subpage_t *subpage;
331
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200332 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200333 if (resolve_subpage && section->mr->subpage) {
334 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200335 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200336 }
337 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200338}
339
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100340/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200341static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200342address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200343 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200344{
345 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100346 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200347
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200348 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349 /* Compute offset within MemoryRegionSection */
350 addr -= section->offset_within_address_space;
351
352 /* Compute offset within MemoryRegion */
353 *xlat = addr + section->offset_within_region;
354
355 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100356 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200357 return section;
358}
Jan Kiszka90260c62013-05-26 21:46:51 +0200359
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100360static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
361{
362 if (memory_region_is_ram(mr)) {
363 return !(is_write && mr->readonly);
364 }
365 if (memory_region_is_romd(mr)) {
366 return !is_write;
367 }
368
369 return false;
370}
371
Paolo Bonzini41063e12015-03-18 14:21:43 +0100372/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200373MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
374 hwaddr *xlat, hwaddr *plen,
375 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200376{
Avi Kivity30951152012-10-30 13:47:46 +0200377 IOMMUTLBEntry iotlb;
378 MemoryRegionSection *section;
379 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200380
381 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100382 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
383 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200384 mr = section->mr;
385
386 if (!mr->iommu_ops) {
387 break;
388 }
389
Le Tan8d7b8cb2014-08-16 13:55:37 +0800390 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200391 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
392 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700393 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200394 if (!(iotlb.perm & (1 << is_write))) {
395 mr = &io_mem_unassigned;
396 break;
397 }
398
399 as = iotlb.target_as;
400 }
401
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000402 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100403 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700404 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100405 }
406
Avi Kivity30951152012-10-30 13:47:46 +0200407 *xlat = addr;
408 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200409}
410
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100411/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200412MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200413address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
414 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200415{
Avi Kivity30951152012-10-30 13:47:46 +0200416 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200417 section = address_space_translate_internal(cpu->memory_dispatch,
418 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200419
420 assert(!section->mr->iommu_ops);
421 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200422}
bellard9fa3e852004-01-04 18:06:42 +0000423#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000424
Andreas Färberb170fce2013-01-20 20:23:22 +0100425#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000426
Juan Quintelae59fb372009-09-29 22:48:21 +0200427static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200428{
Andreas Färber259186a2013-01-17 18:51:17 +0100429 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200430
aurel323098dba2009-03-07 21:28:24 +0000431 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
432 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100433 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100434 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000435
436 return 0;
437}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200438
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400439static int cpu_common_pre_load(void *opaque)
440{
441 CPUState *cpu = opaque;
442
Paolo Bonziniadee6422014-12-19 12:53:14 +0100443 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400444
445 return 0;
446}
447
448static bool cpu_common_exception_index_needed(void *opaque)
449{
450 CPUState *cpu = opaque;
451
Paolo Bonziniadee6422014-12-19 12:53:14 +0100452 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400453}
454
455static const VMStateDescription vmstate_cpu_common_exception_index = {
456 .name = "cpu_common/exception_index",
457 .version_id = 1,
458 .minimum_version_id = 1,
459 .fields = (VMStateField[]) {
460 VMSTATE_INT32(exception_index, CPUState),
461 VMSTATE_END_OF_LIST()
462 }
463};
464
Andreas Färber1a1562f2013-06-17 04:09:11 +0200465const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200466 .name = "cpu_common",
467 .version_id = 1,
468 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400469 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200470 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200471 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100472 VMSTATE_UINT32(halted, CPUState),
473 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400475 },
476 .subsections = (VMStateSubsection[]) {
477 {
478 .vmsd = &vmstate_cpu_common_exception_index,
479 .needed = cpu_common_exception_index_needed,
480 } , {
481 /* empty */
482 }
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200483 }
484};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200485
pbrook9656f322008-07-01 20:01:19 +0000486#endif
487
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100488CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400489{
Andreas Färberbdc44642013-06-24 23:50:24 +0200490 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400491
Andreas Färberbdc44642013-06-24 23:50:24 +0200492 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100493 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200494 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100495 }
Glauber Costa950f1472009-06-09 12:15:18 -0400496 }
497
Andreas Färberbdc44642013-06-24 23:50:24 +0200498 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400499}
500
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000501#if !defined(CONFIG_USER_ONLY)
502void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
503{
504 /* We only support one address space per cpu at the moment. */
505 assert(cpu->as == as);
506
507 if (cpu->tcg_as_listener) {
508 memory_listener_unregister(cpu->tcg_as_listener);
509 } else {
510 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
511 }
512 cpu->tcg_as_listener->commit = tcg_commit;
513 memory_listener_register(cpu->tcg_as_listener, as);
514}
515#endif
516
Andreas Färber9349b4f2012-03-14 01:38:32 +0100517void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000518{
Andreas Färber9f09e182012-05-03 06:59:07 +0200519 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100520 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200521 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000522 int cpu_index;
523
pbrookc2764712009-03-07 15:24:59 +0000524#if defined(CONFIG_USER_ONLY)
525 cpu_list_lock();
526#endif
bellard6a00d602005-11-21 23:25:50 +0000527 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200528 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000529 cpu_index++;
530 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100531 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100532 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200533 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200534 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100535#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000536 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200537 cpu->thread_id = qemu_get_thread_id();
Paolo Bonzinicba70542015-03-09 15:28:37 +0100538 cpu_reload_memory_map(cpu);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100539#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200540 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000541#if defined(CONFIG_USER_ONLY)
542 cpu_list_unlock();
543#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200544 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
545 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
546 }
pbrookb3c77242008-06-30 16:31:04 +0000547#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600548 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000549 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100550 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200551 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000552#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100553 if (cc->vmsd != NULL) {
554 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
555 }
bellardfd6ce8f2003-05-14 19:00:11 +0000556}
557
Paul Brook94df27f2010-02-28 23:47:45 +0000558#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200559static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000560{
561 tb_invalidate_phys_page_range(pc, pc + 1, 0);
562}
563#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200564static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400565{
Max Filippove8262a12013-09-27 22:29:17 +0400566 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
567 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000568 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100569 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400570 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400571}
bellardc27004e2005-01-03 23:35:10 +0000572#endif
bellardd720b932004-04-25 17:57:43 +0000573
Paul Brookc527ee82010-03-01 03:31:14 +0000574#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200575void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000576
577{
578}
579
Peter Maydell3ee887e2014-09-12 14:06:48 +0100580int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
581 int flags)
582{
583 return -ENOSYS;
584}
585
586void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
587{
588}
589
Andreas Färber75a34032013-09-02 16:57:02 +0200590int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000591 int flags, CPUWatchpoint **watchpoint)
592{
593 return -ENOSYS;
594}
595#else
pbrook6658ffb2007-03-16 23:58:11 +0000596/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200597int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000598 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000599{
aliguoric0ce9982008-11-25 22:13:57 +0000600 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000601
Peter Maydell05068c02014-09-12 14:06:48 +0100602 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700603 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200604 error_report("tried to set invalid watchpoint at %"
605 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000606 return -EINVAL;
607 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500608 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000609
aliguoria1d1bb32008-11-18 20:07:32 +0000610 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100611 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000612 wp->flags = flags;
613
aliguori2dc9f412008-11-18 20:56:59 +0000614 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200615 if (flags & BP_GDB) {
616 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
617 } else {
618 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
619 }
aliguoria1d1bb32008-11-18 20:07:32 +0000620
Andreas Färber31b030d2013-09-04 01:29:02 +0200621 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000622
623 if (watchpoint)
624 *watchpoint = wp;
625 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000626}
627
aliguoria1d1bb32008-11-18 20:07:32 +0000628/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200629int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000630 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000631{
aliguoria1d1bb32008-11-18 20:07:32 +0000632 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000633
Andreas Färberff4700b2013-08-26 18:23:18 +0200634 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100635 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000636 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200637 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000638 return 0;
639 }
640 }
aliguoria1d1bb32008-11-18 20:07:32 +0000641 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000642}
643
aliguoria1d1bb32008-11-18 20:07:32 +0000644/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200645void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000646{
Andreas Färberff4700b2013-08-26 18:23:18 +0200647 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000648
Andreas Färber31b030d2013-09-04 01:29:02 +0200649 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000650
Anthony Liguori7267c092011-08-20 22:09:37 -0500651 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000652}
653
aliguoria1d1bb32008-11-18 20:07:32 +0000654/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200655void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000656{
aliguoric0ce9982008-11-25 22:13:57 +0000657 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000658
Andreas Färberff4700b2013-08-26 18:23:18 +0200659 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200660 if (wp->flags & mask) {
661 cpu_watchpoint_remove_by_ref(cpu, wp);
662 }
aliguoric0ce9982008-11-25 22:13:57 +0000663 }
aliguoria1d1bb32008-11-18 20:07:32 +0000664}
Peter Maydell05068c02014-09-12 14:06:48 +0100665
666/* Return true if this watchpoint address matches the specified
667 * access (ie the address range covered by the watchpoint overlaps
668 * partially or completely with the address range covered by the
669 * access).
670 */
671static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
672 vaddr addr,
673 vaddr len)
674{
675 /* We know the lengths are non-zero, but a little caution is
676 * required to avoid errors in the case where the range ends
677 * exactly at the top of the address space and so addr + len
678 * wraps round to zero.
679 */
680 vaddr wpend = wp->vaddr + wp->len - 1;
681 vaddr addrend = addr + len - 1;
682
683 return !(addr > wpend || wp->vaddr > addrend);
684}
685
Paul Brookc527ee82010-03-01 03:31:14 +0000686#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000687
688/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200689int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000690 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000691{
aliguoric0ce9982008-11-25 22:13:57 +0000692 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000693
Anthony Liguori7267c092011-08-20 22:09:37 -0500694 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000695
696 bp->pc = pc;
697 bp->flags = flags;
698
aliguori2dc9f412008-11-18 20:56:59 +0000699 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200700 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200701 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200702 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200703 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200704 }
aliguoria1d1bb32008-11-18 20:07:32 +0000705
Andreas Färberf0c3c502013-08-26 21:22:53 +0200706 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000707
Andreas Färber00b941e2013-06-29 18:55:54 +0200708 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000709 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200710 }
aliguoria1d1bb32008-11-18 20:07:32 +0000711 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000712}
713
714/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200715int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000716{
aliguoria1d1bb32008-11-18 20:07:32 +0000717 CPUBreakpoint *bp;
718
Andreas Färberf0c3c502013-08-26 21:22:53 +0200719 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000720 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200721 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000722 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000723 }
bellard4c3a88a2003-07-26 12:06:08 +0000724 }
aliguoria1d1bb32008-11-18 20:07:32 +0000725 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000726}
727
aliguoria1d1bb32008-11-18 20:07:32 +0000728/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200729void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000730{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200731 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
732
733 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000734
Anthony Liguori7267c092011-08-20 22:09:37 -0500735 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000736}
737
738/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200739void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000740{
aliguoric0ce9982008-11-25 22:13:57 +0000741 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000742
Andreas Färberf0c3c502013-08-26 21:22:53 +0200743 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200744 if (bp->flags & mask) {
745 cpu_breakpoint_remove_by_ref(cpu, bp);
746 }
aliguoric0ce9982008-11-25 22:13:57 +0000747 }
bellard4c3a88a2003-07-26 12:06:08 +0000748}
749
bellardc33a3462003-07-29 20:50:33 +0000750/* enable or disable single step mode. EXCP_DEBUG is returned by the
751 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200752void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000753{
Andreas Färbered2803d2013-06-21 20:20:45 +0200754 if (cpu->singlestep_enabled != enabled) {
755 cpu->singlestep_enabled = enabled;
756 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200757 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200758 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100759 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000760 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200761 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000762 tb_flush(env);
763 }
bellardc33a3462003-07-29 20:50:33 +0000764 }
bellardc33a3462003-07-29 20:50:33 +0000765}
766
Andreas Färbera47dddd2013-09-03 17:38:47 +0200767void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000768{
769 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000770 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000771
772 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000773 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000774 fprintf(stderr, "qemu: fatal: ");
775 vfprintf(stderr, fmt, ap);
776 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200777 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000778 if (qemu_log_enabled()) {
779 qemu_log("qemu: fatal: ");
780 qemu_log_vprintf(fmt, ap2);
781 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200782 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000783 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000784 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000785 }
pbrook493ae1f2007-11-23 16:53:59 +0000786 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000787 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200788#if defined(CONFIG_USER_ONLY)
789 {
790 struct sigaction act;
791 sigfillset(&act.sa_mask);
792 act.sa_handler = SIG_DFL;
793 sigaction(SIGABRT, &act, NULL);
794 }
795#endif
bellard75012672003-06-21 13:11:07 +0000796 abort();
797}
798
bellard01243112004-01-04 15:48:17 +0000799#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400800/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200801static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
802{
803 RAMBlock *block;
804
Paolo Bonzini43771532013-09-09 17:58:40 +0200805 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200806 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200807 goto found;
808 }
Mike Day0dc3f442013-09-05 14:41:35 -0400809 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200810 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200811 goto found;
812 }
813 }
814
815 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
816 abort();
817
818found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200819 /* It is safe to write mru_block outside the iothread lock. This
820 * is what happens:
821 *
822 * mru_block = xxx
823 * rcu_read_unlock()
824 * xxx removed from list
825 * rcu_read_lock()
826 * read mru_block
827 * mru_block = NULL;
828 * call_rcu(reclaim_ramblock, xxx);
829 * rcu_read_unlock()
830 *
831 * atomic_rcu_set is not needed here. The block was already published
832 * when it was placed into the list. Here we're just making an extra
833 * copy of the pointer.
834 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200835 ram_list.mru_block = block;
836 return block;
837}
838
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200839static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000840{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200841 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200842 RAMBlock *block;
843 ram_addr_t end;
844
845 end = TARGET_PAGE_ALIGN(start + length);
846 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000847
Mike Day0dc3f442013-09-05 14:41:35 -0400848 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200849 block = qemu_get_ram_block(start);
850 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200851 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000852 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400853 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200854}
855
856/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200857void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200858 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200859{
Juan Quintelad24981d2012-05-22 00:42:40 +0200860 if (length == 0)
861 return;
Michael S. Tsirkinc8d6f662014-11-17 17:54:07 +0200862 cpu_physical_memory_clear_dirty_range_type(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200863
864 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200865 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200866 }
bellard1ccde1c2004-02-06 19:46:14 +0000867}
868
Juan Quintela981fdf22013-10-10 11:54:09 +0200869static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000870{
871 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000872}
873
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100874/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200875hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200876 MemoryRegionSection *section,
877 target_ulong vaddr,
878 hwaddr paddr, hwaddr xlat,
879 int prot,
880 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000881{
Avi Kivitya8170e52012-10-23 12:30:10 +0200882 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000883 CPUWatchpoint *wp;
884
Blue Swirlcc5bea62012-04-14 14:56:48 +0000885 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000886 /* Normal RAM. */
887 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200888 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000889 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200890 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000891 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200892 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000893 }
894 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100895 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200896 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000897 }
898
899 /* Make accesses to pages with watchpoints go via the
900 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200901 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100902 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000903 /* Avoid trapping reads of pages with a write breakpoint. */
904 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200905 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000906 *address |= TLB_MMIO;
907 break;
908 }
909 }
910 }
911
912 return iotlb;
913}
bellard9fa3e852004-01-04 18:06:42 +0000914#endif /* defined(CONFIG_USER_ONLY) */
915
pbrooke2eef172008-06-08 01:09:01 +0000916#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000917
Anthony Liguoric227f092009-10-01 16:12:16 -0500918static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200919 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200920static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200921
Igor Mammedova2b257d2014-10-31 16:38:37 +0000922static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
923 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200924
925/*
926 * Set a custom physical guest memory alloator.
927 * Accelerators with unusual needs may need this. Hopefully, we can
928 * get rid of it eventually.
929 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000930void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200931{
932 phys_mem_alloc = alloc;
933}
934
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200935static uint16_t phys_section_add(PhysPageMap *map,
936 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200937{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200938 /* The physical section number is ORed with a page-aligned
939 * pointer to produce the iotlb entries. Thus it should
940 * never overflow into the page-aligned value.
941 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200942 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200943
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200944 if (map->sections_nb == map->sections_nb_alloc) {
945 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
946 map->sections = g_renew(MemoryRegionSection, map->sections,
947 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200948 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200949 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200950 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200951 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200952}
953
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200954static void phys_section_destroy(MemoryRegion *mr)
955{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200956 memory_region_unref(mr);
957
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200958 if (mr->subpage) {
959 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700960 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200961 g_free(subpage);
962 }
963}
964
Paolo Bonzini60926662013-05-29 12:30:26 +0200965static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200966{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200967 while (map->sections_nb > 0) {
968 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200969 phys_section_destroy(section->mr);
970 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200971 g_free(map->sections);
972 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200973}
974
Avi Kivityac1970f2012-10-03 16:22:53 +0200975static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200976{
977 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200978 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200979 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200980 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200981 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200982 MemoryRegionSection subsection = {
983 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200984 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200985 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200986 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200987
Avi Kivityf3705d52012-03-08 16:16:34 +0200988 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200989
Avi Kivityf3705d52012-03-08 16:16:34 +0200990 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200991 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100992 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200993 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200994 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200995 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +0200996 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +0200997 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200998 }
999 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001000 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001001 subpage_register(subpage, start, end,
1002 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001003}
1004
1005
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001006static void register_multipage(AddressSpaceDispatch *d,
1007 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001008{
Avi Kivitya8170e52012-10-23 12:30:10 +02001009 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001010 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001011 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1012 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001013
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001014 assert(num_pages);
1015 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001016}
1017
Avi Kivityac1970f2012-10-03 16:22:53 +02001018static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001019{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001020 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001021 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001022 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001023 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001024
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001025 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1026 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1027 - now.offset_within_address_space;
1028
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001029 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001030 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001031 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001032 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001033 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001034 while (int128_ne(remain.size, now.size)) {
1035 remain.size = int128_sub(remain.size, now.size);
1036 remain.offset_within_address_space += int128_get64(now.size);
1037 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001038 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001039 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001040 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001041 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001042 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001043 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001044 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001045 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001046 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001047 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001048 }
1049}
1050
Sheng Yang62a27442010-01-26 19:21:16 +08001051void qemu_flush_coalesced_mmio_buffer(void)
1052{
1053 if (kvm_enabled())
1054 kvm_flush_coalesced_mmio_buffer();
1055}
1056
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001057void qemu_mutex_lock_ramlist(void)
1058{
1059 qemu_mutex_lock(&ram_list.mutex);
1060}
1061
1062void qemu_mutex_unlock_ramlist(void)
1063{
1064 qemu_mutex_unlock(&ram_list.mutex);
1065}
1066
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001067#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001068
1069#include <sys/vfs.h>
1070
1071#define HUGETLBFS_MAGIC 0x958458f6
1072
Hu Taofc7a5802014-09-09 13:28:01 +08001073static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001074{
1075 struct statfs fs;
1076 int ret;
1077
1078 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001079 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001080 } while (ret != 0 && errno == EINTR);
1081
1082 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001083 error_setg_errno(errp, errno, "failed to get page size of file %s",
1084 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001085 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001086 }
1087
1088 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001089 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001090
1091 return fs.f_bsize;
1092}
1093
Alex Williamson04b16652010-07-02 11:13:17 -06001094static void *file_ram_alloc(RAMBlock *block,
1095 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001096 const char *path,
1097 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001098{
1099 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001100 char *sanitized_name;
1101 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001102 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001103 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001104 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001105 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001106
Hu Taofc7a5802014-09-09 13:28:01 +08001107 hpagesize = gethugepagesize(path, &local_err);
1108 if (local_err) {
1109 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001110 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001111 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001112 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001113
1114 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001115 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1116 "or larger than huge page size 0x%" PRIx64,
1117 memory, hpagesize);
1118 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001119 }
1120
1121 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001122 error_setg(errp,
1123 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001124 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001125 }
1126
Peter Feiner8ca761f2013-03-04 13:54:25 -05001127 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001128 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001129 for (c = sanitized_name; *c != '\0'; c++) {
1130 if (*c == '/')
1131 *c = '_';
1132 }
1133
1134 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1135 sanitized_name);
1136 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001137
1138 fd = mkstemp(filename);
1139 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001140 error_setg_errno(errp, errno,
1141 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001142 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001143 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001144 }
1145 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001146 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001147
1148 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1149
1150 /*
1151 * ftruncate is not supported by hugetlbfs in older
1152 * hosts, so don't bother bailing out on errors.
1153 * If anything goes wrong with it under other filesystems,
1154 * mmap will fail.
1155 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001156 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001157 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001158 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001159
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001160 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1161 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1162 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001163 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001164 error_setg_errno(errp, errno,
1165 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001166 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001167 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001168 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001169
1170 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001171 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001172 }
1173
Alex Williamson04b16652010-07-02 11:13:17 -06001174 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001175 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001176
1177error:
1178 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001179 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001180 exit(1);
1181 }
1182 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001183}
1184#endif
1185
Mike Day0dc3f442013-09-05 14:41:35 -04001186/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001187static ram_addr_t find_ram_offset(ram_addr_t size)
1188{
Alex Williamson04b16652010-07-02 11:13:17 -06001189 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001190 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001191
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001192 assert(size != 0); /* it would hand out same offset multiple times */
1193
Mike Day0dc3f442013-09-05 14:41:35 -04001194 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001195 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001196 }
Alex Williamson04b16652010-07-02 11:13:17 -06001197
Mike Day0dc3f442013-09-05 14:41:35 -04001198 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001199 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001200
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001201 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001202
Mike Day0dc3f442013-09-05 14:41:35 -04001203 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001204 if (next_block->offset >= end) {
1205 next = MIN(next, next_block->offset);
1206 }
1207 }
1208 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001209 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001210 mingap = next - end;
1211 }
1212 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001213
1214 if (offset == RAM_ADDR_MAX) {
1215 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1216 (uint64_t)size);
1217 abort();
1218 }
1219
Alex Williamson04b16652010-07-02 11:13:17 -06001220 return offset;
1221}
1222
Juan Quintela652d7ec2012-07-20 10:37:54 +02001223ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001224{
Alex Williamsond17b5282010-06-25 11:08:38 -06001225 RAMBlock *block;
1226 ram_addr_t last = 0;
1227
Mike Day0dc3f442013-09-05 14:41:35 -04001228 rcu_read_lock();
1229 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001230 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001231 }
Mike Day0dc3f442013-09-05 14:41:35 -04001232 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001233 return last;
1234}
1235
Jason Baronddb97f12012-08-02 15:44:16 -04001236static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1237{
1238 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001239
1240 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001241 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001242 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1243 if (ret) {
1244 perror("qemu_madvise");
1245 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1246 "but dump_guest_core=off specified\n");
1247 }
1248 }
1249}
1250
Mike Day0dc3f442013-09-05 14:41:35 -04001251/* Called within an RCU critical section, or while the ramlist lock
1252 * is held.
1253 */
Hu Tao20cfe882014-04-02 15:13:26 +08001254static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001255{
Hu Tao20cfe882014-04-02 15:13:26 +08001256 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001257
Mike Day0dc3f442013-09-05 14:41:35 -04001258 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001259 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001260 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001261 }
1262 }
Hu Tao20cfe882014-04-02 15:13:26 +08001263
1264 return NULL;
1265}
1266
Mike Dayae3a7042013-09-05 14:41:35 -04001267/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001268void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1269{
Mike Dayae3a7042013-09-05 14:41:35 -04001270 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001271
Mike Day0dc3f442013-09-05 14:41:35 -04001272 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001273 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001274 assert(new_block);
1275 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001276
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001277 if (dev) {
1278 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001279 if (id) {
1280 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001281 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001282 }
1283 }
1284 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1285
Mike Day0dc3f442013-09-05 14:41:35 -04001286 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001287 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001288 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1289 new_block->idstr);
1290 abort();
1291 }
1292 }
Mike Day0dc3f442013-09-05 14:41:35 -04001293 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001294}
1295
Mike Dayae3a7042013-09-05 14:41:35 -04001296/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001297void qemu_ram_unset_idstr(ram_addr_t addr)
1298{
Mike Dayae3a7042013-09-05 14:41:35 -04001299 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001300
Mike Dayae3a7042013-09-05 14:41:35 -04001301 /* FIXME: arch_init.c assumes that this is not called throughout
1302 * migration. Ignore the problem since hot-unplug during migration
1303 * does not work anyway.
1304 */
1305
Mike Day0dc3f442013-09-05 14:41:35 -04001306 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001307 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001308 if (block) {
1309 memset(block->idstr, 0, sizeof(block->idstr));
1310 }
Mike Day0dc3f442013-09-05 14:41:35 -04001311 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001312}
1313
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001314static int memory_try_enable_merging(void *addr, size_t len)
1315{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001316 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001317 /* disabled by the user */
1318 return 0;
1319 }
1320
1321 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1322}
1323
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001324/* Only legal before guest might have detected the memory size: e.g. on
1325 * incoming migration, or right after reset.
1326 *
1327 * As memory core doesn't know how is memory accessed, it is up to
1328 * resize callback to update device state and/or add assertions to detect
1329 * misuse, if necessary.
1330 */
1331int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1332{
1333 RAMBlock *block = find_ram_block(base);
1334
1335 assert(block);
1336
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001337 newsize = TARGET_PAGE_ALIGN(newsize);
1338
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001339 if (block->used_length == newsize) {
1340 return 0;
1341 }
1342
1343 if (!(block->flags & RAM_RESIZEABLE)) {
1344 error_setg_errno(errp, EINVAL,
1345 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1346 " in != 0x" RAM_ADDR_FMT, block->idstr,
1347 newsize, block->used_length);
1348 return -EINVAL;
1349 }
1350
1351 if (block->max_length < newsize) {
1352 error_setg_errno(errp, EINVAL,
1353 "Length too large: %s: 0x" RAM_ADDR_FMT
1354 " > 0x" RAM_ADDR_FMT, block->idstr,
1355 newsize, block->max_length);
1356 return -EINVAL;
1357 }
1358
1359 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1360 block->used_length = newsize;
1361 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1362 memory_region_set_size(block->mr, newsize);
1363 if (block->resized) {
1364 block->resized(block->idstr, newsize, block->host);
1365 }
1366 return 0;
1367}
1368
Hu Taoef701d72014-09-09 13:27:54 +08001369static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001370{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001371 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001372 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001373 ram_addr_t old_ram_size, new_ram_size;
1374
1375 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001376
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001377 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001378 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001379
1380 if (!new_block->host) {
1381 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001382 xen_ram_alloc(new_block->offset, new_block->max_length,
1383 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001384 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001385 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001386 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001387 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001388 error_setg_errno(errp, errno,
1389 "cannot set up guest memory '%s'",
1390 memory_region_name(new_block->mr));
1391 qemu_mutex_unlock_ramlist();
1392 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001393 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001394 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001395 }
1396 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001397
Mike Day0d53d9f2015-01-21 13:45:24 +01001398 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1399 * QLIST (which has an RCU-friendly variant) does not have insertion at
1400 * tail, so save the last element in last_block.
1401 */
Mike Day0dc3f442013-09-05 14:41:35 -04001402 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001403 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001404 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001405 break;
1406 }
1407 }
1408 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001409 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001410 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001411 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001412 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001413 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001414 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001415 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001416
Mike Day0dc3f442013-09-05 14:41:35 -04001417 /* Write list before version */
1418 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001419 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001420 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001421
Juan Quintela2152f5c2013-10-08 13:52:02 +02001422 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1423
1424 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001425 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001426
1427 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001428 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1429 ram_list.dirty_memory[i] =
1430 bitmap_zero_extend(ram_list.dirty_memory[i],
1431 old_ram_size, new_ram_size);
1432 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001433 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001434 cpu_physical_memory_set_dirty_range(new_block->offset,
1435 new_block->used_length);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001436
Paolo Bonzinia904c912015-01-21 16:18:35 +01001437 if (new_block->host) {
1438 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1439 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1440 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1441 if (kvm_enabled()) {
1442 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1443 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001444 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001445
1446 return new_block->offset;
1447}
1448
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001449#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001450ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001451 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001452 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001453{
1454 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001455 ram_addr_t addr;
1456 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001457
1458 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001459 error_setg(errp, "-mem-path not supported with Xen");
1460 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001461 }
1462
1463 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1464 /*
1465 * file_ram_alloc() needs to allocate just like
1466 * phys_mem_alloc, but we haven't bothered to provide
1467 * a hook there.
1468 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001469 error_setg(errp,
1470 "-mem-path not supported with this accelerator");
1471 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001472 }
1473
1474 size = TARGET_PAGE_ALIGN(size);
1475 new_block = g_malloc0(sizeof(*new_block));
1476 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001477 new_block->used_length = size;
1478 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001479 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001480 new_block->host = file_ram_alloc(new_block, size,
1481 mem_path, errp);
1482 if (!new_block->host) {
1483 g_free(new_block);
1484 return -1;
1485 }
1486
Hu Taoef701d72014-09-09 13:27:54 +08001487 addr = ram_block_add(new_block, &local_err);
1488 if (local_err) {
1489 g_free(new_block);
1490 error_propagate(errp, local_err);
1491 return -1;
1492 }
1493 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001494}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001495#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001496
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001497static
1498ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1499 void (*resized)(const char*,
1500 uint64_t length,
1501 void *host),
1502 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001503 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001504{
1505 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001506 ram_addr_t addr;
1507 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001508
1509 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001510 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001511 new_block = g_malloc0(sizeof(*new_block));
1512 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001513 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001514 new_block->used_length = size;
1515 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001516 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001517 new_block->fd = -1;
1518 new_block->host = host;
1519 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001520 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001521 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001522 if (resizeable) {
1523 new_block->flags |= RAM_RESIZEABLE;
1524 }
Hu Taoef701d72014-09-09 13:27:54 +08001525 addr = ram_block_add(new_block, &local_err);
1526 if (local_err) {
1527 g_free(new_block);
1528 error_propagate(errp, local_err);
1529 return -1;
1530 }
1531 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001532}
1533
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001534ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1535 MemoryRegion *mr, Error **errp)
1536{
1537 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1538}
1539
Hu Taoef701d72014-09-09 13:27:54 +08001540ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001541{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001542 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1543}
1544
1545ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1546 void (*resized)(const char*,
1547 uint64_t length,
1548 void *host),
1549 MemoryRegion *mr, Error **errp)
1550{
1551 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001552}
bellarde9a1ab12007-02-08 23:08:38 +00001553
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001554void qemu_ram_free_from_ptr(ram_addr_t addr)
1555{
1556 RAMBlock *block;
1557
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001558 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001559 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001560 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001561 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001562 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001563 /* Write list before version */
1564 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001565 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001566 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001567 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001568 }
1569 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001570 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001571}
1572
Paolo Bonzini43771532013-09-09 17:58:40 +02001573static void reclaim_ramblock(RAMBlock *block)
1574{
1575 if (block->flags & RAM_PREALLOC) {
1576 ;
1577 } else if (xen_enabled()) {
1578 xen_invalidate_map_cache_entry(block->host);
1579#ifndef _WIN32
1580 } else if (block->fd >= 0) {
1581 munmap(block->host, block->max_length);
1582 close(block->fd);
1583#endif
1584 } else {
1585 qemu_anon_ram_free(block->host, block->max_length);
1586 }
1587 g_free(block);
1588}
1589
Anthony Liguoric227f092009-10-01 16:12:16 -05001590void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001591{
Alex Williamson04b16652010-07-02 11:13:17 -06001592 RAMBlock *block;
1593
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001594 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001595 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001596 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001597 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001598 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001599 /* Write list before version */
1600 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001601 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001602 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001603 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001604 }
1605 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001606 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001607}
1608
Huang Yingcd19cfa2011-03-02 08:56:19 +01001609#ifndef _WIN32
1610void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1611{
1612 RAMBlock *block;
1613 ram_addr_t offset;
1614 int flags;
1615 void *area, *vaddr;
1616
Mike Day0dc3f442013-09-05 14:41:35 -04001617 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001618 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001619 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001620 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001621 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001622 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001623 } else if (xen_enabled()) {
1624 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001625 } else {
1626 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001627 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001628 flags |= (block->flags & RAM_SHARED ?
1629 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001630 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1631 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001632 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001633 /*
1634 * Remap needs to match alloc. Accelerators that
1635 * set phys_mem_alloc never remap. If they did,
1636 * we'd need a remap hook here.
1637 */
1638 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1639
Huang Yingcd19cfa2011-03-02 08:56:19 +01001640 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1641 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1642 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001643 }
1644 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001645 fprintf(stderr, "Could not remap addr: "
1646 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001647 length, addr);
1648 exit(1);
1649 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001650 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001651 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001652 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001653 }
1654 }
1655}
1656#endif /* !_WIN32 */
1657
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001658int qemu_get_ram_fd(ram_addr_t addr)
1659{
Mike Dayae3a7042013-09-05 14:41:35 -04001660 RAMBlock *block;
1661 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001662
Mike Day0dc3f442013-09-05 14:41:35 -04001663 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001664 block = qemu_get_ram_block(addr);
1665 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001666 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001667 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001668}
1669
Damjan Marion3fd74b82014-06-26 23:01:32 +02001670void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1671{
Mike Dayae3a7042013-09-05 14:41:35 -04001672 RAMBlock *block;
1673 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001674
Mike Day0dc3f442013-09-05 14:41:35 -04001675 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001676 block = qemu_get_ram_block(addr);
1677 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001678 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001679 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001680}
1681
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001682/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001683 * This should not be used for general purpose DMA. Use address_space_map
1684 * or address_space_rw instead. For local memory (e.g. video ram) that the
1685 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001686 *
1687 * By the time this function returns, the returned pointer is not protected
1688 * by RCU anymore. If the caller is not within an RCU critical section and
1689 * does not hold the iothread lock, it must have other means of protecting the
1690 * pointer, such as a reference to the region that includes the incoming
1691 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001692 */
1693void *qemu_get_ram_ptr(ram_addr_t addr)
1694{
Mike Dayae3a7042013-09-05 14:41:35 -04001695 RAMBlock *block;
1696 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001697
Mike Day0dc3f442013-09-05 14:41:35 -04001698 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001699 block = qemu_get_ram_block(addr);
1700
1701 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001702 /* We need to check if the requested address is in the RAM
1703 * because we don't want to map the entire memory in QEMU.
1704 * In that case just map until the end of the page.
1705 */
1706 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001707 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001708 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001709 }
Mike Dayae3a7042013-09-05 14:41:35 -04001710
1711 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001712 }
Mike Dayae3a7042013-09-05 14:41:35 -04001713 ptr = ramblock_ptr(block, addr - block->offset);
1714
Mike Day0dc3f442013-09-05 14:41:35 -04001715unlock:
1716 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001717 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001718}
1719
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001720/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001721 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001722 *
1723 * By the time this function returns, the returned pointer is not protected
1724 * by RCU anymore. If the caller is not within an RCU critical section and
1725 * does not hold the iothread lock, it must have other means of protecting the
1726 * pointer, such as a reference to the region that includes the incoming
1727 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001728 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001729static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001730{
Mike Dayae3a7042013-09-05 14:41:35 -04001731 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001732 if (*size == 0) {
1733 return NULL;
1734 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001735 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001736 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001737 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001738 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001739 rcu_read_lock();
1740 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001741 if (addr - block->offset < block->max_length) {
1742 if (addr - block->offset + *size > block->max_length)
1743 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001744 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001745 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001746 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001747 }
1748 }
1749
1750 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1751 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001752 }
1753}
1754
Paolo Bonzini7443b432013-06-03 12:44:02 +02001755/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001756 * (typically a TLB entry) back to a ram offset.
1757 *
1758 * By the time this function returns, the returned pointer is not protected
1759 * by RCU anymore. If the caller is not within an RCU critical section and
1760 * does not hold the iothread lock, it must have other means of protecting the
1761 * pointer, such as a reference to the region that includes the incoming
1762 * ram_addr_t.
1763 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001764MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001765{
pbrook94a6b542009-04-11 17:15:54 +00001766 RAMBlock *block;
1767 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001768 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001769
Jan Kiszka868bb332011-06-21 22:59:09 +02001770 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001771 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001772 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001773 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001774 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001775 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001776 }
1777
Mike Day0dc3f442013-09-05 14:41:35 -04001778 rcu_read_lock();
1779 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001780 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001781 goto found;
1782 }
1783
Mike Day0dc3f442013-09-05 14:41:35 -04001784 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001785 /* This case append when the block is not mapped. */
1786 if (block->host == NULL) {
1787 continue;
1788 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001789 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001790 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001791 }
pbrook94a6b542009-04-11 17:15:54 +00001792 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001793
Mike Day0dc3f442013-09-05 14:41:35 -04001794 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001795 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001796
1797found:
1798 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001799 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001800 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001801 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001802}
Alex Williamsonf471a172010-06-11 11:11:42 -06001803
Avi Kivitya8170e52012-10-23 12:30:10 +02001804static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001805 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001806{
Juan Quintela52159192013-10-08 12:44:04 +02001807 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001808 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001809 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001810 switch (size) {
1811 case 1:
1812 stb_p(qemu_get_ram_ptr(ram_addr), val);
1813 break;
1814 case 2:
1815 stw_p(qemu_get_ram_ptr(ram_addr), val);
1816 break;
1817 case 4:
1818 stl_p(qemu_get_ram_ptr(ram_addr), val);
1819 break;
1820 default:
1821 abort();
1822 }
Paolo Bonzini68868672014-07-21 16:45:18 +02001823 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
bellardf23db162005-08-21 19:12:28 +00001824 /* we remove the notdirty callback only if the code has been
1825 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001826 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001827 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001828 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001829 }
bellard1ccde1c2004-02-06 19:46:14 +00001830}
1831
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001832static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1833 unsigned size, bool is_write)
1834{
1835 return is_write;
1836}
1837
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001838static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001839 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001840 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001841 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001842};
1843
pbrook0f459d12008-06-09 00:20:13 +00001844/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001845static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001846{
Andreas Färber93afead2013-08-26 03:41:01 +02001847 CPUState *cpu = current_cpu;
1848 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001849 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001850 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001851 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001852 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001853
Andreas Färberff4700b2013-08-26 18:23:18 +02001854 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001855 /* We re-entered the check after replacing the TB. Now raise
1856 * the debug interrupt so that is will trigger after the
1857 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001858 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001859 return;
1860 }
Andreas Färber93afead2013-08-26 03:41:01 +02001861 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001862 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001863 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1864 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001865 if (flags == BP_MEM_READ) {
1866 wp->flags |= BP_WATCHPOINT_HIT_READ;
1867 } else {
1868 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1869 }
1870 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001871 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001872 if (!cpu->watchpoint_hit) {
1873 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001874 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001875 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001876 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001877 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001878 } else {
1879 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001880 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001881 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001882 }
aliguori06d55cc2008-11-18 20:24:06 +00001883 }
aliguori6e140f22008-11-18 20:37:55 +00001884 } else {
1885 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001886 }
1887 }
1888}
1889
pbrook6658ffb2007-03-16 23:58:11 +00001890/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1891 so these check for a hit then pass through to the normal out-of-line
1892 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001893static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1894 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001895{
Peter Maydell66b9b432015-04-26 16:49:24 +01001896 MemTxResult res;
1897 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001898
Peter Maydell66b9b432015-04-26 16:49:24 +01001899 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001900 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001901 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001902 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001903 break;
1904 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01001905 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001906 break;
1907 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01001908 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001909 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001910 default: abort();
1911 }
Peter Maydell66b9b432015-04-26 16:49:24 +01001912 *pdata = data;
1913 return res;
1914}
1915
1916static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1917 uint64_t val, unsigned size,
1918 MemTxAttrs attrs)
1919{
1920 MemTxResult res;
1921
1922 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1923 switch (size) {
1924 case 1:
1925 address_space_stb(&address_space_memory, addr, val, attrs, &res);
1926 break;
1927 case 2:
1928 address_space_stw(&address_space_memory, addr, val, attrs, &res);
1929 break;
1930 case 4:
1931 address_space_stl(&address_space_memory, addr, val, attrs, &res);
1932 break;
1933 default: abort();
1934 }
1935 return res;
pbrook6658ffb2007-03-16 23:58:11 +00001936}
1937
Avi Kivity1ec9b902012-01-02 12:47:48 +02001938static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01001939 .read_with_attrs = watch_mem_read,
1940 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001941 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001942};
pbrook6658ffb2007-03-16 23:58:11 +00001943
Peter Maydellf25a49e2015-04-26 16:49:24 +01001944static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1945 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00001946{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001947 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001948 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01001949 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001950
blueswir1db7b5422007-05-26 17:36:03 +00001951#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001952 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001953 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001954#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01001955 res = address_space_read(subpage->as, addr + subpage->base,
1956 attrs, buf, len);
1957 if (res) {
1958 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01001959 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001960 switch (len) {
1961 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001962 *data = ldub_p(buf);
1963 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001964 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001965 *data = lduw_p(buf);
1966 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001967 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001968 *data = ldl_p(buf);
1969 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001970 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001971 *data = ldq_p(buf);
1972 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001973 default:
1974 abort();
1975 }
blueswir1db7b5422007-05-26 17:36:03 +00001976}
1977
Peter Maydellf25a49e2015-04-26 16:49:24 +01001978static MemTxResult subpage_write(void *opaque, hwaddr addr,
1979 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00001980{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001981 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001982 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001983
blueswir1db7b5422007-05-26 17:36:03 +00001984#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001985 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001986 " value %"PRIx64"\n",
1987 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001988#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001989 switch (len) {
1990 case 1:
1991 stb_p(buf, value);
1992 break;
1993 case 2:
1994 stw_p(buf, value);
1995 break;
1996 case 4:
1997 stl_p(buf, value);
1998 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001999 case 8:
2000 stq_p(buf, value);
2001 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002002 default:
2003 abort();
2004 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002005 return address_space_write(subpage->as, addr + subpage->base,
2006 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002007}
2008
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002009static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002010 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002011{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002012 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002013#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002014 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002015 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002016#endif
2017
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002018 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002019 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002020}
2021
Avi Kivity70c68e42012-01-02 12:32:48 +02002022static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002023 .read_with_attrs = subpage_read,
2024 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002025 .impl.min_access_size = 1,
2026 .impl.max_access_size = 8,
2027 .valid.min_access_size = 1,
2028 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002029 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002030 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002031};
2032
Anthony Liguoric227f092009-10-01 16:12:16 -05002033static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002034 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002035{
2036 int idx, eidx;
2037
2038 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2039 return -1;
2040 idx = SUBPAGE_IDX(start);
2041 eidx = SUBPAGE_IDX(end);
2042#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002043 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2044 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002045#endif
blueswir1db7b5422007-05-26 17:36:03 +00002046 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002047 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002048 }
2049
2050 return 0;
2051}
2052
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002053static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002054{
Anthony Liguoric227f092009-10-01 16:12:16 -05002055 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002056
Anthony Liguori7267c092011-08-20 22:09:37 -05002057 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002058
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002059 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002060 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002061 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002062 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002063 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002064#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002065 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2066 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002067#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002068 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002069
2070 return mmio;
2071}
2072
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002073static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2074 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002075{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002076 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002077 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002078 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002079 .mr = mr,
2080 .offset_within_address_space = 0,
2081 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002082 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002083 };
2084
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002085 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002086}
2087
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002088MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002089{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002090 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2091 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002092
2093 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002094}
2095
Avi Kivitye9179ce2009-06-14 11:38:52 +03002096static void io_mem_init(void)
2097{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002098 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002099 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002100 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002101 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002102 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002103 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002104 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002105}
2106
Avi Kivityac1970f2012-10-03 16:22:53 +02002107static void mem_begin(MemoryListener *listener)
2108{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002109 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002110 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2111 uint16_t n;
2112
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002113 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002114 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002115 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002116 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002117 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002118 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002119 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002120 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002121
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002122 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002123 d->as = as;
2124 as->next_dispatch = d;
2125}
2126
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002127static void address_space_dispatch_free(AddressSpaceDispatch *d)
2128{
2129 phys_sections_free(&d->map);
2130 g_free(d);
2131}
2132
Paolo Bonzini00752702013-05-29 12:13:54 +02002133static void mem_commit(MemoryListener *listener)
2134{
2135 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002136 AddressSpaceDispatch *cur = as->dispatch;
2137 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002138
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002139 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002140
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002141 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002142 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002143 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002144 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002145}
2146
Avi Kivity1d711482012-10-02 18:54:45 +02002147static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002148{
Andreas Färber182735e2013-05-29 22:29:20 +02002149 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002150
2151 /* since each CPU stores ram addresses in its TLB cache, we must
2152 reset the modified entries */
2153 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002154 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002155 /* FIXME: Disentangle the cpu.h circular files deps so we can
2156 directly get the right CPU from listener. */
2157 if (cpu->tcg_as_listener != listener) {
2158 continue;
2159 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002160 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002161 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002162}
2163
Avi Kivity93632742012-02-08 16:54:16 +02002164static void core_log_global_start(MemoryListener *listener)
2165{
Juan Quintela981fdf22013-10-10 11:54:09 +02002166 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02002167}
2168
2169static void core_log_global_stop(MemoryListener *listener)
2170{
Juan Quintela981fdf22013-10-10 11:54:09 +02002171 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02002172}
2173
Avi Kivity93632742012-02-08 16:54:16 +02002174static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02002175 .log_global_start = core_log_global_start,
2176 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02002177 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02002178};
2179
Avi Kivityac1970f2012-10-03 16:22:53 +02002180void address_space_init_dispatch(AddressSpace *as)
2181{
Paolo Bonzini00752702013-05-29 12:13:54 +02002182 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002183 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002184 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002185 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002186 .region_add = mem_add,
2187 .region_nop = mem_add,
2188 .priority = 0,
2189 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002190 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002191}
2192
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002193void address_space_unregister(AddressSpace *as)
2194{
2195 memory_listener_unregister(&as->dispatch_listener);
2196}
2197
Avi Kivity83f3c252012-10-07 12:59:55 +02002198void address_space_destroy_dispatch(AddressSpace *as)
2199{
2200 AddressSpaceDispatch *d = as->dispatch;
2201
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002202 atomic_rcu_set(&as->dispatch, NULL);
2203 if (d) {
2204 call_rcu(d, address_space_dispatch_free, rcu);
2205 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002206}
2207
Avi Kivity62152b82011-07-26 14:26:14 +03002208static void memory_map_init(void)
2209{
Anthony Liguori7267c092011-08-20 22:09:37 -05002210 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002211
Paolo Bonzini57271d62013-11-07 17:14:37 +01002212 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002213 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002214
Anthony Liguori7267c092011-08-20 22:09:37 -05002215 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002216 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2217 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002218 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02002219
Avi Kivityf6790af2012-10-02 20:13:51 +02002220 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03002221}
2222
2223MemoryRegion *get_system_memory(void)
2224{
2225 return system_memory;
2226}
2227
Avi Kivity309cb472011-08-08 16:09:03 +03002228MemoryRegion *get_system_io(void)
2229{
2230 return system_io;
2231}
2232
pbrooke2eef172008-06-08 01:09:01 +00002233#endif /* !defined(CONFIG_USER_ONLY) */
2234
bellard13eb76e2004-01-24 15:23:36 +00002235/* physical memory access (slow version, mainly for debug) */
2236#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002237int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002238 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002239{
2240 int l, flags;
2241 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002242 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002243
2244 while (len > 0) {
2245 page = addr & TARGET_PAGE_MASK;
2246 l = (page + TARGET_PAGE_SIZE) - addr;
2247 if (l > len)
2248 l = len;
2249 flags = page_get_flags(page);
2250 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002251 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002252 if (is_write) {
2253 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002254 return -1;
bellard579a97f2007-11-11 14:26:47 +00002255 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002256 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002257 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002258 memcpy(p, buf, l);
2259 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002260 } else {
2261 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002262 return -1;
bellard579a97f2007-11-11 14:26:47 +00002263 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002264 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002265 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002266 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002267 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002268 }
2269 len -= l;
2270 buf += l;
2271 addr += l;
2272 }
Paul Brooka68fe892010-03-01 00:08:59 +00002273 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002274}
bellard8df1cd02005-01-28 22:37:22 +00002275
bellard13eb76e2004-01-24 15:23:36 +00002276#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002277
Avi Kivitya8170e52012-10-23 12:30:10 +02002278static void invalidate_and_set_dirty(hwaddr addr,
2279 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002280{
Peter Maydellf874bf92014-11-16 19:44:21 +00002281 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2282 tb_invalidate_phys_range(addr, addr + length, 0);
Paolo Bonzini68868672014-07-21 16:45:18 +02002283 cpu_physical_memory_set_dirty_range_nocode(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002284 }
Anthony PERARDe2269392012-10-03 13:49:22 +00002285 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002286}
2287
Richard Henderson23326162013-07-08 14:55:59 -07002288static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002289{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002290 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002291
2292 /* Regions are assumed to support 1-4 byte accesses unless
2293 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002294 if (access_size_max == 0) {
2295 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002296 }
Richard Henderson23326162013-07-08 14:55:59 -07002297
2298 /* Bound the maximum access by the alignment of the address. */
2299 if (!mr->ops->impl.unaligned) {
2300 unsigned align_size_max = addr & -addr;
2301 if (align_size_max != 0 && align_size_max < access_size_max) {
2302 access_size_max = align_size_max;
2303 }
2304 }
2305
2306 /* Don't attempt accesses larger than the maximum. */
2307 if (l > access_size_max) {
2308 l = access_size_max;
2309 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002310 if (l & (l - 1)) {
2311 l = 1 << (qemu_fls(l) - 1);
2312 }
Richard Henderson23326162013-07-08 14:55:59 -07002313
2314 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002315}
2316
Peter Maydell5c9eb022015-04-26 16:49:24 +01002317MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2318 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002319{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002320 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002321 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002322 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002323 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002324 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002325 MemTxResult result = MEMTX_OK;
ths3b46e622007-09-17 08:09:54 +00002326
Paolo Bonzini41063e12015-03-18 14:21:43 +01002327 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002328 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002329 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002330 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002331
bellard13eb76e2004-01-24 15:23:36 +00002332 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002333 if (!memory_access_is_direct(mr, is_write)) {
2334 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002335 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002336 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002337 switch (l) {
2338 case 8:
2339 /* 64 bit write access */
2340 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002341 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2342 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002343 break;
2344 case 4:
bellard1c213d12005-09-03 10:49:04 +00002345 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002346 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002347 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2348 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002349 break;
2350 case 2:
bellard1c213d12005-09-03 10:49:04 +00002351 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002352 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002353 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2354 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002355 break;
2356 case 1:
bellard1c213d12005-09-03 10:49:04 +00002357 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002358 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002359 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2360 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002361 break;
2362 default:
2363 abort();
bellard13eb76e2004-01-24 15:23:36 +00002364 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002365 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002366 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002367 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002368 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002369 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002370 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002371 }
2372 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002373 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002374 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002375 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002376 switch (l) {
2377 case 8:
2378 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002379 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2380 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002381 stq_p(buf, val);
2382 break;
2383 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002384 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002385 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2386 attrs);
bellardc27004e2005-01-03 23:35:10 +00002387 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002388 break;
2389 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002390 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002391 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2392 attrs);
bellardc27004e2005-01-03 23:35:10 +00002393 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002394 break;
2395 case 1:
bellard1c213d12005-09-03 10:49:04 +00002396 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002397 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2398 attrs);
bellardc27004e2005-01-03 23:35:10 +00002399 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002400 break;
2401 default:
2402 abort();
bellard13eb76e2004-01-24 15:23:36 +00002403 }
2404 } else {
2405 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002406 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002407 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002408 }
2409 }
2410 len -= l;
2411 buf += l;
2412 addr += l;
2413 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002414 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002415
Peter Maydell3b643492015-04-26 16:49:23 +01002416 return result;
bellard13eb76e2004-01-24 15:23:36 +00002417}
bellard8df1cd02005-01-28 22:37:22 +00002418
Peter Maydell5c9eb022015-04-26 16:49:24 +01002419MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2420 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002421{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002422 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002423}
2424
Peter Maydell5c9eb022015-04-26 16:49:24 +01002425MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2426 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002427{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002428 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002429}
2430
2431
Avi Kivitya8170e52012-10-23 12:30:10 +02002432void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002433 int len, int is_write)
2434{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002435 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2436 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002437}
2438
Alexander Graf582b55a2013-12-11 14:17:44 +01002439enum write_rom_type {
2440 WRITE_DATA,
2441 FLUSH_CACHE,
2442};
2443
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002444static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002445 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002446{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002447 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002448 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002449 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002450 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002451
Paolo Bonzini41063e12015-03-18 14:21:43 +01002452 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002453 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002454 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002455 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002456
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002457 if (!(memory_region_is_ram(mr) ||
2458 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002459 /* do nothing */
2460 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002461 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002462 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002463 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002464 switch (type) {
2465 case WRITE_DATA:
2466 memcpy(ptr, buf, l);
2467 invalidate_and_set_dirty(addr1, l);
2468 break;
2469 case FLUSH_CACHE:
2470 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2471 break;
2472 }
bellardd0ecd2a2006-04-23 17:14:48 +00002473 }
2474 len -= l;
2475 buf += l;
2476 addr += l;
2477 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002478 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002479}
2480
Alexander Graf582b55a2013-12-11 14:17:44 +01002481/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002482void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002483 const uint8_t *buf, int len)
2484{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002485 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002486}
2487
2488void cpu_flush_icache_range(hwaddr start, int len)
2489{
2490 /*
2491 * This function should do the same thing as an icache flush that was
2492 * triggered from within the guest. For TCG we are always cache coherent,
2493 * so there is no need to flush anything. For KVM / Xen we need to flush
2494 * the host's instruction cache at least.
2495 */
2496 if (tcg_enabled()) {
2497 return;
2498 }
2499
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002500 cpu_physical_memory_write_rom_internal(&address_space_memory,
2501 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002502}
2503
aliguori6d16c2f2009-01-22 16:59:11 +00002504typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002505 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002506 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002507 hwaddr addr;
2508 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002509 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002510} BounceBuffer;
2511
2512static BounceBuffer bounce;
2513
aliguoriba223c22009-01-22 16:59:16 +00002514typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002515 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002516 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002517} MapClient;
2518
Fam Zheng38e047b2015-03-16 17:03:35 +08002519QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002520static QLIST_HEAD(map_client_list, MapClient) map_client_list
2521 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002522
Fam Zhenge95205e2015-03-16 17:03:37 +08002523static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002524{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002525 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002526 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002527}
2528
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002529static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002530{
2531 MapClient *client;
2532
Blue Swirl72cf2d42009-09-12 07:36:22 +00002533 while (!QLIST_EMPTY(&map_client_list)) {
2534 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002535 qemu_bh_schedule(client->bh);
2536 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002537 }
2538}
2539
Fam Zhenge95205e2015-03-16 17:03:37 +08002540void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002541{
2542 MapClient *client = g_malloc(sizeof(*client));
2543
Fam Zheng38e047b2015-03-16 17:03:35 +08002544 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002545 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002546 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002547 if (!atomic_read(&bounce.in_use)) {
2548 cpu_notify_map_clients_locked();
2549 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002550 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002551}
2552
Fam Zheng38e047b2015-03-16 17:03:35 +08002553void cpu_exec_init_all(void)
2554{
2555 qemu_mutex_init(&ram_list.mutex);
2556 memory_map_init();
2557 io_mem_init();
2558 qemu_mutex_init(&map_client_list_lock);
2559}
2560
Fam Zhenge95205e2015-03-16 17:03:37 +08002561void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002562{
Fam Zhenge95205e2015-03-16 17:03:37 +08002563 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002564
Fam Zhenge95205e2015-03-16 17:03:37 +08002565 qemu_mutex_lock(&map_client_list_lock);
2566 QLIST_FOREACH(client, &map_client_list, link) {
2567 if (client->bh == bh) {
2568 cpu_unregister_map_client_do(client);
2569 break;
2570 }
2571 }
2572 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002573}
2574
2575static void cpu_notify_map_clients(void)
2576{
Fam Zheng38e047b2015-03-16 17:03:35 +08002577 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002578 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002579 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002580}
2581
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002582bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2583{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002584 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002585 hwaddr l, xlat;
2586
Paolo Bonzini41063e12015-03-18 14:21:43 +01002587 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002588 while (len > 0) {
2589 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002590 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2591 if (!memory_access_is_direct(mr, is_write)) {
2592 l = memory_access_size(mr, l, addr);
2593 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002594 return false;
2595 }
2596 }
2597
2598 len -= l;
2599 addr += l;
2600 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002601 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002602 return true;
2603}
2604
aliguori6d16c2f2009-01-22 16:59:11 +00002605/* Map a physical memory region into a host virtual address.
2606 * May map a subset of the requested range, given by and returned in *plen.
2607 * May return NULL if resources needed to perform the mapping are exhausted.
2608 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002609 * Use cpu_register_map_client() to know when retrying the map operation is
2610 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002611 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002612void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002613 hwaddr addr,
2614 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002615 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002616{
Avi Kivitya8170e52012-10-23 12:30:10 +02002617 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002618 hwaddr done = 0;
2619 hwaddr l, xlat, base;
2620 MemoryRegion *mr, *this_mr;
2621 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002622
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002623 if (len == 0) {
2624 return NULL;
2625 }
aliguori6d16c2f2009-01-22 16:59:11 +00002626
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002627 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002628 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002629 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002630
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002631 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002632 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002633 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002634 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002635 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002636 /* Avoid unbounded allocations */
2637 l = MIN(l, TARGET_PAGE_SIZE);
2638 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002639 bounce.addr = addr;
2640 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002641
2642 memory_region_ref(mr);
2643 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002644 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002645 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2646 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002647 }
aliguori6d16c2f2009-01-22 16:59:11 +00002648
Paolo Bonzini41063e12015-03-18 14:21:43 +01002649 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002650 *plen = l;
2651 return bounce.buffer;
2652 }
2653
2654 base = xlat;
2655 raddr = memory_region_get_ram_addr(mr);
2656
2657 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002658 len -= l;
2659 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002660 done += l;
2661 if (len == 0) {
2662 break;
2663 }
2664
2665 l = len;
2666 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2667 if (this_mr != mr || xlat != base + done) {
2668 break;
2669 }
aliguori6d16c2f2009-01-22 16:59:11 +00002670 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002671
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002672 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002673 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002674 *plen = done;
2675 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002676}
2677
Avi Kivityac1970f2012-10-03 16:22:53 +02002678/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002679 * Will also mark the memory as dirty if is_write == 1. access_len gives
2680 * the amount of memory that was actually read or written by the caller.
2681 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002682void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2683 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002684{
2685 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002686 MemoryRegion *mr;
2687 ram_addr_t addr1;
2688
2689 mr = qemu_ram_addr_from_host(buffer, &addr1);
2690 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002691 if (is_write) {
Paolo Bonzini68868672014-07-21 16:45:18 +02002692 invalidate_and_set_dirty(addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002693 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002694 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002695 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002696 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002697 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002698 return;
2699 }
2700 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002701 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2702 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002703 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002704 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002705 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002706 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002707 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002708 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002709}
bellardd0ecd2a2006-04-23 17:14:48 +00002710
Avi Kivitya8170e52012-10-23 12:30:10 +02002711void *cpu_physical_memory_map(hwaddr addr,
2712 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002713 int is_write)
2714{
2715 return address_space_map(&address_space_memory, addr, plen, is_write);
2716}
2717
Avi Kivitya8170e52012-10-23 12:30:10 +02002718void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2719 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002720{
2721 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2722}
2723
bellard8df1cd02005-01-28 22:37:22 +00002724/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002725static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2726 MemTxAttrs attrs,
2727 MemTxResult *result,
2728 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002729{
bellard8df1cd02005-01-28 22:37:22 +00002730 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002731 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002732 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002733 hwaddr l = 4;
2734 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002735 MemTxResult r;
bellard8df1cd02005-01-28 22:37:22 +00002736
Paolo Bonzini41063e12015-03-18 14:21:43 +01002737 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002738 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002739 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002740 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002741 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002742#if defined(TARGET_WORDS_BIGENDIAN)
2743 if (endian == DEVICE_LITTLE_ENDIAN) {
2744 val = bswap32(val);
2745 }
2746#else
2747 if (endian == DEVICE_BIG_ENDIAN) {
2748 val = bswap32(val);
2749 }
2750#endif
bellard8df1cd02005-01-28 22:37:22 +00002751 } else {
2752 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002753 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002754 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002755 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002756 switch (endian) {
2757 case DEVICE_LITTLE_ENDIAN:
2758 val = ldl_le_p(ptr);
2759 break;
2760 case DEVICE_BIG_ENDIAN:
2761 val = ldl_be_p(ptr);
2762 break;
2763 default:
2764 val = ldl_p(ptr);
2765 break;
2766 }
Peter Maydell50013112015-04-26 16:49:24 +01002767 r = MEMTX_OK;
2768 }
2769 if (result) {
2770 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002771 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002772 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002773 return val;
2774}
2775
Peter Maydell50013112015-04-26 16:49:24 +01002776uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2777 MemTxAttrs attrs, MemTxResult *result)
2778{
2779 return address_space_ldl_internal(as, addr, attrs, result,
2780 DEVICE_NATIVE_ENDIAN);
2781}
2782
2783uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2784 MemTxAttrs attrs, MemTxResult *result)
2785{
2786 return address_space_ldl_internal(as, addr, attrs, result,
2787 DEVICE_LITTLE_ENDIAN);
2788}
2789
2790uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2791 MemTxAttrs attrs, MemTxResult *result)
2792{
2793 return address_space_ldl_internal(as, addr, attrs, result,
2794 DEVICE_BIG_ENDIAN);
2795}
2796
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002797uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002798{
Peter Maydell50013112015-04-26 16:49:24 +01002799 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002800}
2801
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002802uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002803{
Peter Maydell50013112015-04-26 16:49:24 +01002804 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002805}
2806
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002807uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002808{
Peter Maydell50013112015-04-26 16:49:24 +01002809 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002810}
2811
bellard84b7b8e2005-11-28 21:19:04 +00002812/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002813static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2814 MemTxAttrs attrs,
2815 MemTxResult *result,
2816 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002817{
bellard84b7b8e2005-11-28 21:19:04 +00002818 uint8_t *ptr;
2819 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002820 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002821 hwaddr l = 8;
2822 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002823 MemTxResult r;
bellard84b7b8e2005-11-28 21:19:04 +00002824
Paolo Bonzini41063e12015-03-18 14:21:43 +01002825 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002826 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002827 false);
2828 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002829 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002830 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002831#if defined(TARGET_WORDS_BIGENDIAN)
2832 if (endian == DEVICE_LITTLE_ENDIAN) {
2833 val = bswap64(val);
2834 }
2835#else
2836 if (endian == DEVICE_BIG_ENDIAN) {
2837 val = bswap64(val);
2838 }
2839#endif
bellard84b7b8e2005-11-28 21:19:04 +00002840 } else {
2841 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002842 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002843 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002844 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002845 switch (endian) {
2846 case DEVICE_LITTLE_ENDIAN:
2847 val = ldq_le_p(ptr);
2848 break;
2849 case DEVICE_BIG_ENDIAN:
2850 val = ldq_be_p(ptr);
2851 break;
2852 default:
2853 val = ldq_p(ptr);
2854 break;
2855 }
Peter Maydell50013112015-04-26 16:49:24 +01002856 r = MEMTX_OK;
2857 }
2858 if (result) {
2859 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002860 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002861 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002862 return val;
2863}
2864
Peter Maydell50013112015-04-26 16:49:24 +01002865uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2866 MemTxAttrs attrs, MemTxResult *result)
2867{
2868 return address_space_ldq_internal(as, addr, attrs, result,
2869 DEVICE_NATIVE_ENDIAN);
2870}
2871
2872uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2873 MemTxAttrs attrs, MemTxResult *result)
2874{
2875 return address_space_ldq_internal(as, addr, attrs, result,
2876 DEVICE_LITTLE_ENDIAN);
2877}
2878
2879uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2880 MemTxAttrs attrs, MemTxResult *result)
2881{
2882 return address_space_ldq_internal(as, addr, attrs, result,
2883 DEVICE_BIG_ENDIAN);
2884}
2885
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002886uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002887{
Peter Maydell50013112015-04-26 16:49:24 +01002888 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002889}
2890
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002891uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002892{
Peter Maydell50013112015-04-26 16:49:24 +01002893 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002894}
2895
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002896uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002897{
Peter Maydell50013112015-04-26 16:49:24 +01002898 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002899}
2900
bellardaab33092005-10-30 20:48:42 +00002901/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01002902uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2903 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00002904{
2905 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01002906 MemTxResult r;
2907
2908 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2909 if (result) {
2910 *result = r;
2911 }
bellardaab33092005-10-30 20:48:42 +00002912 return val;
2913}
2914
Peter Maydell50013112015-04-26 16:49:24 +01002915uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2916{
2917 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2918}
2919
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002920/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002921static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2922 hwaddr addr,
2923 MemTxAttrs attrs,
2924 MemTxResult *result,
2925 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002926{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002927 uint8_t *ptr;
2928 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002929 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002930 hwaddr l = 2;
2931 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002932 MemTxResult r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002933
Paolo Bonzini41063e12015-03-18 14:21:43 +01002934 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002935 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002936 false);
2937 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002938 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002939 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002940#if defined(TARGET_WORDS_BIGENDIAN)
2941 if (endian == DEVICE_LITTLE_ENDIAN) {
2942 val = bswap16(val);
2943 }
2944#else
2945 if (endian == DEVICE_BIG_ENDIAN) {
2946 val = bswap16(val);
2947 }
2948#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002949 } else {
2950 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002951 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002952 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002953 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002954 switch (endian) {
2955 case DEVICE_LITTLE_ENDIAN:
2956 val = lduw_le_p(ptr);
2957 break;
2958 case DEVICE_BIG_ENDIAN:
2959 val = lduw_be_p(ptr);
2960 break;
2961 default:
2962 val = lduw_p(ptr);
2963 break;
2964 }
Peter Maydell50013112015-04-26 16:49:24 +01002965 r = MEMTX_OK;
2966 }
2967 if (result) {
2968 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002969 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002970 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002971 return val;
bellardaab33092005-10-30 20:48:42 +00002972}
2973
Peter Maydell50013112015-04-26 16:49:24 +01002974uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2975 MemTxAttrs attrs, MemTxResult *result)
2976{
2977 return address_space_lduw_internal(as, addr, attrs, result,
2978 DEVICE_NATIVE_ENDIAN);
2979}
2980
2981uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2982 MemTxAttrs attrs, MemTxResult *result)
2983{
2984 return address_space_lduw_internal(as, addr, attrs, result,
2985 DEVICE_LITTLE_ENDIAN);
2986}
2987
2988uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2989 MemTxAttrs attrs, MemTxResult *result)
2990{
2991 return address_space_lduw_internal(as, addr, attrs, result,
2992 DEVICE_BIG_ENDIAN);
2993}
2994
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002995uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002996{
Peter Maydell50013112015-04-26 16:49:24 +01002997 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002998}
2999
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003000uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003001{
Peter Maydell50013112015-04-26 16:49:24 +01003002 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003003}
3004
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003005uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003006{
Peter Maydell50013112015-04-26 16:49:24 +01003007 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003008}
3009
bellard8df1cd02005-01-28 22:37:22 +00003010/* warning: addr must be aligned. The ram page is not masked as dirty
3011 and the code inside is not invalidated. It is useful if the dirty
3012 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003013void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3014 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003015{
bellard8df1cd02005-01-28 22:37:22 +00003016 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003017 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003018 hwaddr l = 4;
3019 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003020 MemTxResult r;
bellard8df1cd02005-01-28 22:37:22 +00003021
Paolo Bonzini41063e12015-03-18 14:21:43 +01003022 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003023 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003024 true);
3025 if (l < 4 || !memory_access_is_direct(mr, true)) {
Peter Maydell50013112015-04-26 16:49:24 +01003026 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003027 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003028 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003029 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003030 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003031
3032 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02003033 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00003034 /* invalidate code */
3035 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3036 /* set dirty bit */
Paolo Bonzini68868672014-07-21 16:45:18 +02003037 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
aliguori74576192008-10-06 14:02:03 +00003038 }
3039 }
Peter Maydell50013112015-04-26 16:49:24 +01003040 r = MEMTX_OK;
3041 }
3042 if (result) {
3043 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003044 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003045 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003046}
3047
Peter Maydell50013112015-04-26 16:49:24 +01003048void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3049{
3050 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3051}
3052
bellard8df1cd02005-01-28 22:37:22 +00003053/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003054static inline void address_space_stl_internal(AddressSpace *as,
3055 hwaddr addr, uint32_t val,
3056 MemTxAttrs attrs,
3057 MemTxResult *result,
3058 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003059{
bellard8df1cd02005-01-28 22:37:22 +00003060 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003061 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003062 hwaddr l = 4;
3063 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003064 MemTxResult r;
bellard8df1cd02005-01-28 22:37:22 +00003065
Paolo Bonzini41063e12015-03-18 14:21:43 +01003066 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003067 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003068 true);
3069 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003070#if defined(TARGET_WORDS_BIGENDIAN)
3071 if (endian == DEVICE_LITTLE_ENDIAN) {
3072 val = bswap32(val);
3073 }
3074#else
3075 if (endian == DEVICE_BIG_ENDIAN) {
3076 val = bswap32(val);
3077 }
3078#endif
Peter Maydell50013112015-04-26 16:49:24 +01003079 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003080 } else {
bellard8df1cd02005-01-28 22:37:22 +00003081 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003082 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003083 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003084 switch (endian) {
3085 case DEVICE_LITTLE_ENDIAN:
3086 stl_le_p(ptr, val);
3087 break;
3088 case DEVICE_BIG_ENDIAN:
3089 stl_be_p(ptr, val);
3090 break;
3091 default:
3092 stl_p(ptr, val);
3093 break;
3094 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003095 invalidate_and_set_dirty(addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003096 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003097 }
Peter Maydell50013112015-04-26 16:49:24 +01003098 if (result) {
3099 *result = r;
3100 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003101 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003102}
3103
3104void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3105 MemTxAttrs attrs, MemTxResult *result)
3106{
3107 address_space_stl_internal(as, addr, val, attrs, result,
3108 DEVICE_NATIVE_ENDIAN);
3109}
3110
3111void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3112 MemTxAttrs attrs, MemTxResult *result)
3113{
3114 address_space_stl_internal(as, addr, val, attrs, result,
3115 DEVICE_LITTLE_ENDIAN);
3116}
3117
3118void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3119 MemTxAttrs attrs, MemTxResult *result)
3120{
3121 address_space_stl_internal(as, addr, val, attrs, result,
3122 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003123}
3124
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003125void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003126{
Peter Maydell50013112015-04-26 16:49:24 +01003127 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003128}
3129
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003130void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003131{
Peter Maydell50013112015-04-26 16:49:24 +01003132 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003133}
3134
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003135void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003136{
Peter Maydell50013112015-04-26 16:49:24 +01003137 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003138}
3139
bellardaab33092005-10-30 20:48:42 +00003140/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003141void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3142 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003143{
3144 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003145 MemTxResult r;
3146
3147 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3148 if (result) {
3149 *result = r;
3150 }
3151}
3152
3153void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3154{
3155 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003156}
3157
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003158/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003159static inline void address_space_stw_internal(AddressSpace *as,
3160 hwaddr addr, uint32_t val,
3161 MemTxAttrs attrs,
3162 MemTxResult *result,
3163 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003164{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003165 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003166 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003167 hwaddr l = 2;
3168 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003169 MemTxResult r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003170
Paolo Bonzini41063e12015-03-18 14:21:43 +01003171 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003172 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003173 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003174#if defined(TARGET_WORDS_BIGENDIAN)
3175 if (endian == DEVICE_LITTLE_ENDIAN) {
3176 val = bswap16(val);
3177 }
3178#else
3179 if (endian == DEVICE_BIG_ENDIAN) {
3180 val = bswap16(val);
3181 }
3182#endif
Peter Maydell50013112015-04-26 16:49:24 +01003183 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003184 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003185 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003186 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003187 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003188 switch (endian) {
3189 case DEVICE_LITTLE_ENDIAN:
3190 stw_le_p(ptr, val);
3191 break;
3192 case DEVICE_BIG_ENDIAN:
3193 stw_be_p(ptr, val);
3194 break;
3195 default:
3196 stw_p(ptr, val);
3197 break;
3198 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003199 invalidate_and_set_dirty(addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003200 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003201 }
Peter Maydell50013112015-04-26 16:49:24 +01003202 if (result) {
3203 *result = r;
3204 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003205 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003206}
3207
3208void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3209 MemTxAttrs attrs, MemTxResult *result)
3210{
3211 address_space_stw_internal(as, addr, val, attrs, result,
3212 DEVICE_NATIVE_ENDIAN);
3213}
3214
3215void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3216 MemTxAttrs attrs, MemTxResult *result)
3217{
3218 address_space_stw_internal(as, addr, val, attrs, result,
3219 DEVICE_LITTLE_ENDIAN);
3220}
3221
3222void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3223 MemTxAttrs attrs, MemTxResult *result)
3224{
3225 address_space_stw_internal(as, addr, val, attrs, result,
3226 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003227}
3228
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003229void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003230{
Peter Maydell50013112015-04-26 16:49:24 +01003231 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003232}
3233
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003234void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003235{
Peter Maydell50013112015-04-26 16:49:24 +01003236 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003237}
3238
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003239void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003240{
Peter Maydell50013112015-04-26 16:49:24 +01003241 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003242}
3243
bellardaab33092005-10-30 20:48:42 +00003244/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003245void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3246 MemTxAttrs attrs, MemTxResult *result)
3247{
3248 MemTxResult r;
3249 val = tswap64(val);
3250 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3251 if (result) {
3252 *result = r;
3253 }
3254}
3255
3256void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3257 MemTxAttrs attrs, MemTxResult *result)
3258{
3259 MemTxResult r;
3260 val = cpu_to_le64(val);
3261 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3262 if (result) {
3263 *result = r;
3264 }
3265}
3266void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3267 MemTxAttrs attrs, MemTxResult *result)
3268{
3269 MemTxResult r;
3270 val = cpu_to_be64(val);
3271 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3272 if (result) {
3273 *result = r;
3274 }
3275}
3276
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003277void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003278{
Peter Maydell50013112015-04-26 16:49:24 +01003279 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003280}
3281
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003282void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003283{
Peter Maydell50013112015-04-26 16:49:24 +01003284 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003285}
3286
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003287void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003288{
Peter Maydell50013112015-04-26 16:49:24 +01003289 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003290}
3291
aliguori5e2972f2009-03-28 17:51:36 +00003292/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003293int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003294 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003295{
3296 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003297 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003298 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003299
3300 while (len > 0) {
3301 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003302 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003303 /* if no physical page mapped, return an error */
3304 if (phys_addr == -1)
3305 return -1;
3306 l = (page + TARGET_PAGE_SIZE) - addr;
3307 if (l > len)
3308 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003309 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003310 if (is_write) {
3311 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3312 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003313 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3314 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003315 }
bellard13eb76e2004-01-24 15:23:36 +00003316 len -= l;
3317 buf += l;
3318 addr += l;
3319 }
3320 return 0;
3321}
Paul Brooka68fe892010-03-01 00:08:59 +00003322#endif
bellard13eb76e2004-01-24 15:23:36 +00003323
Blue Swirl8e4a4242013-01-06 18:30:17 +00003324/*
3325 * A helper function for the _utterly broken_ virtio device model to find out if
3326 * it's running on a big endian machine. Don't do this at home kids!
3327 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003328bool target_words_bigendian(void);
3329bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003330{
3331#if defined(TARGET_WORDS_BIGENDIAN)
3332 return true;
3333#else
3334 return false;
3335#endif
3336}
3337
Wen Congyang76f35532012-05-07 12:04:18 +08003338#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003339bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003340{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003341 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003342 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003343 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003344
Paolo Bonzini41063e12015-03-18 14:21:43 +01003345 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003346 mr = address_space_translate(&address_space_memory,
3347 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003348
Paolo Bonzini41063e12015-03-18 14:21:43 +01003349 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3350 rcu_read_unlock();
3351 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003352}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003353
3354void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3355{
3356 RAMBlock *block;
3357
Mike Day0dc3f442013-09-05 14:41:35 -04003358 rcu_read_lock();
3359 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02003360 func(block->host, block->offset, block->used_length, opaque);
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003361 }
Mike Day0dc3f442013-09-05 14:41:35 -04003362 rcu_read_unlock();
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003363}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003364#endif