blob: 7d90a522524b64a86a09c71dd54da804380ad803 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
58
blueswir1db7b5422007-05-26 17:36:03 +000059//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000060
pbrook99773bd2006-04-16 15:14:59 +000061#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040062/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
63 * are protected by the ramlist lock.
64 */
Mike Day0d53d9f2015-01-21 13:45:24 +010065RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030066
67static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030068static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030069
Avi Kivityf6790af2012-10-02 20:13:51 +020070AddressSpace address_space_io;
71AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020072
Paolo Bonzini0844e002013-05-24 14:37:28 +020073MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020074static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020075
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080076/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
77#define RAM_PREALLOC (1 << 0)
78
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080079/* RAM is mmap-ed with MAP_SHARED */
80#define RAM_SHARED (1 << 1)
81
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020082/* Only a portion of RAM (used_length) is actually used, and migrated.
83 * This used_length size can change across reboots.
84 */
85#define RAM_RESIZEABLE (1 << 2)
86
Michael S. Tsirkin8561c922015-09-10 16:41:17 +030087/* An extra page is mapped on top of this RAM.
88 */
89#define RAM_EXTRA (1 << 3)
pbrooke2eef172008-06-08 01:09:01 +000090#endif
bellard9fa3e852004-01-04 18:06:42 +000091
Andreas Färberbdc44642013-06-24 23:50:24 +020092struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000093/* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020095__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000096/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000097 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000098 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010099int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000100
pbrooke2eef172008-06-08 01:09:01 +0000101#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200102
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200103typedef struct PhysPageEntry PhysPageEntry;
104
105struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200109 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200110};
111
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100115#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200117#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118#define P_L2_SIZE (1 << P_L2_BITS)
119
120#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121
122typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200123
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200124typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100125 struct rcu_head rcu;
126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127 unsigned sections_nb;
128 unsigned sections_nb_alloc;
129 unsigned nodes_nb;
130 unsigned nodes_nb_alloc;
131 Node *nodes;
132 MemoryRegionSection *sections;
133} PhysPageMap;
134
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200135struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100136 struct rcu_head rcu;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138 /* This is a multi-level map on the physical address space.
139 * The bottom level has pointers to MemoryRegionSections.
140 */
141 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200142 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200143 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200144};
145
Jan Kiszka90260c62013-05-26 21:46:51 +0200146#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
147typedef struct subpage_t {
148 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200149 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200150 hwaddr base;
151 uint16_t sub_section[TARGET_PAGE_SIZE];
152} subpage_t;
153
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200154#define PHYS_SECTION_UNASSIGNED 0
155#define PHYS_SECTION_NOTDIRTY 1
156#define PHYS_SECTION_ROM 2
157#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200158
pbrooke2eef172008-06-08 01:09:01 +0000159static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300160static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000161static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000162
Avi Kivity1ec9b902012-01-02 12:47:48 +0200163static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000164#endif
bellard54936002003-05-13 00:25:15 +0000165
Paul Brook6d9a1302010-02-28 23:55:53 +0000166#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200167
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200168static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200169{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200170 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
171 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
172 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
173 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200174 }
175}
176
Paolo Bonzinidb946042015-05-21 15:12:29 +0200177static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200178{
179 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200180 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200181 PhysPageEntry e;
182 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200185 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200186 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200188
189 e.skip = leaf ? 0 : 1;
190 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100191 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200192 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200194 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200195}
196
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200197static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
198 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200199 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200200{
201 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100202 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200203
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200204 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200206 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200209
Paolo Bonzini03f49952013-11-07 17:14:36 +0100210 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200211 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200212 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200213 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200214 *index += step;
215 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200216 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200217 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200218 }
219 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200220 }
221}
222
Avi Kivityac1970f2012-10-03 16:22:53 +0200223static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200224 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200225 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000226{
Avi Kivity29990972012-02-13 20:21:20 +0200227 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200228 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000229
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200230 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000231}
232
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200233/* Compact a non leaf page entry. Simply detect that the entry has a single child,
234 * and update our entry so we can skip it and go directly to the destination.
235 */
236static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
237{
238 unsigned valid_ptr = P_L2_SIZE;
239 int valid = 0;
240 PhysPageEntry *p;
241 int i;
242
243 if (lp->ptr == PHYS_MAP_NODE_NIL) {
244 return;
245 }
246
247 p = nodes[lp->ptr];
248 for (i = 0; i < P_L2_SIZE; i++) {
249 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
250 continue;
251 }
252
253 valid_ptr = i;
254 valid++;
255 if (p[i].skip) {
256 phys_page_compact(&p[i], nodes, compacted);
257 }
258 }
259
260 /* We can only compress if there's only one child. */
261 if (valid != 1) {
262 return;
263 }
264
265 assert(valid_ptr < P_L2_SIZE);
266
267 /* Don't compress if it won't fit in the # of bits we have. */
268 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
269 return;
270 }
271
272 lp->ptr = p[valid_ptr].ptr;
273 if (!p[valid_ptr].skip) {
274 /* If our only child is a leaf, make this a leaf. */
275 /* By design, we should have made this node a leaf to begin with so we
276 * should never reach here.
277 * But since it's so simple to handle this, let's do it just in case we
278 * change this rule.
279 */
280 lp->skip = 0;
281 } else {
282 lp->skip += p[valid_ptr].skip;
283 }
284}
285
286static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
287{
288 DECLARE_BITMAP(compacted, nodes_nb);
289
290 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200291 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200292 }
293}
294
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200295static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200296 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000297{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200298 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200299 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200300 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200301
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200302 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200303 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200304 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200305 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200306 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100307 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200308 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200309
310 if (sections[lp.ptr].size.hi ||
311 range_covers_byte(sections[lp.ptr].offset_within_address_space,
312 sections[lp.ptr].size.lo, addr)) {
313 return &sections[lp.ptr];
314 } else {
315 return &sections[PHYS_SECTION_UNASSIGNED];
316 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200317}
318
Blue Swirle5548612012-04-21 13:08:33 +0000319bool memory_region_is_unassigned(MemoryRegion *mr)
320{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200321 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000322 && mr != &io_mem_watch;
323}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200324
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100325/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200326static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200327 hwaddr addr,
328 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200329{
Jan Kiszka90260c62013-05-26 21:46:51 +0200330 MemoryRegionSection *section;
331 subpage_t *subpage;
332
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200333 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200334 if (resolve_subpage && section->mr->subpage) {
335 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200336 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200337 }
338 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200339}
340
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100341/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200342static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200343address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200344 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200345{
346 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200347 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100348 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200350 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200351 /* Compute offset within MemoryRegionSection */
352 addr -= section->offset_within_address_space;
353
354 /* Compute offset within MemoryRegion */
355 *xlat = addr + section->offset_within_region;
356
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200357 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200358
359 /* MMIO registers can be expected to perform full-width accesses based only
360 * on their address, without considering adjacent registers that could
361 * decode to completely different MemoryRegions. When such registers
362 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
363 * regions overlap wildly. For this reason we cannot clamp the accesses
364 * here.
365 *
366 * If the length is small (as is the case for address_space_ldl/stl),
367 * everything works fine. If the incoming length is large, however,
368 * the caller really has to do the clamping through memory_access_size.
369 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200370 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200371 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200372 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
373 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200374 return section;
375}
Jan Kiszka90260c62013-05-26 21:46:51 +0200376
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100377static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
378{
379 if (memory_region_is_ram(mr)) {
380 return !(is_write && mr->readonly);
381 }
382 if (memory_region_is_romd(mr)) {
383 return !is_write;
384 }
385
386 return false;
387}
388
Paolo Bonzini41063e12015-03-18 14:21:43 +0100389/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200390MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
391 hwaddr *xlat, hwaddr *plen,
392 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200393{
Avi Kivity30951152012-10-30 13:47:46 +0200394 IOMMUTLBEntry iotlb;
395 MemoryRegionSection *section;
396 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200397
398 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100399 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
400 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200401 mr = section->mr;
402
403 if (!mr->iommu_ops) {
404 break;
405 }
406
Le Tan8d7b8cb2014-08-16 13:55:37 +0800407 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200408 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
409 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700410 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200411 if (!(iotlb.perm & (1 << is_write))) {
412 mr = &io_mem_unassigned;
413 break;
414 }
415
416 as = iotlb.target_as;
417 }
418
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000419 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100420 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700421 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100422 }
423
Avi Kivity30951152012-10-30 13:47:46 +0200424 *xlat = addr;
425 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200426}
427
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100428/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200429MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200430address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
431 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200432{
Avi Kivity30951152012-10-30 13:47:46 +0200433 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200434 section = address_space_translate_internal(cpu->memory_dispatch,
435 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200436
437 assert(!section->mr->iommu_ops);
438 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200439}
bellard9fa3e852004-01-04 18:06:42 +0000440#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000441
Andreas Färberb170fce2013-01-20 20:23:22 +0100442#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000443
Juan Quintelae59fb372009-09-29 22:48:21 +0200444static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200445{
Andreas Färber259186a2013-01-17 18:51:17 +0100446 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200447
aurel323098dba2009-03-07 21:28:24 +0000448 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
449 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100450 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100451 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000452
453 return 0;
454}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200455
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400456static int cpu_common_pre_load(void *opaque)
457{
458 CPUState *cpu = opaque;
459
Paolo Bonziniadee6422014-12-19 12:53:14 +0100460 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400461
462 return 0;
463}
464
465static bool cpu_common_exception_index_needed(void *opaque)
466{
467 CPUState *cpu = opaque;
468
Paolo Bonziniadee6422014-12-19 12:53:14 +0100469 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400470}
471
472static const VMStateDescription vmstate_cpu_common_exception_index = {
473 .name = "cpu_common/exception_index",
474 .version_id = 1,
475 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200476 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400477 .fields = (VMStateField[]) {
478 VMSTATE_INT32(exception_index, CPUState),
479 VMSTATE_END_OF_LIST()
480 }
481};
482
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300483static bool cpu_common_crash_occurred_needed(void *opaque)
484{
485 CPUState *cpu = opaque;
486
487 return cpu->crash_occurred;
488}
489
490static const VMStateDescription vmstate_cpu_common_crash_occurred = {
491 .name = "cpu_common/crash_occurred",
492 .version_id = 1,
493 .minimum_version_id = 1,
494 .needed = cpu_common_crash_occurred_needed,
495 .fields = (VMStateField[]) {
496 VMSTATE_BOOL(crash_occurred, CPUState),
497 VMSTATE_END_OF_LIST()
498 }
499};
500
Andreas Färber1a1562f2013-06-17 04:09:11 +0200501const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200502 .name = "cpu_common",
503 .version_id = 1,
504 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400505 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200506 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200507 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100508 VMSTATE_UINT32(halted, CPUState),
509 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400511 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200512 .subsections = (const VMStateDescription*[]) {
513 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300514 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200515 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200516 }
517};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200518
pbrook9656f322008-07-01 20:01:19 +0000519#endif
520
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100521CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400522{
Andreas Färberbdc44642013-06-24 23:50:24 +0200523 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400524
Andreas Färberbdc44642013-06-24 23:50:24 +0200525 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100526 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200527 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100528 }
Glauber Costa950f1472009-06-09 12:15:18 -0400529 }
530
Andreas Färberbdc44642013-06-24 23:50:24 +0200531 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400532}
533
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000534#if !defined(CONFIG_USER_ONLY)
535void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
536{
537 /* We only support one address space per cpu at the moment. */
538 assert(cpu->as == as);
539
540 if (cpu->tcg_as_listener) {
541 memory_listener_unregister(cpu->tcg_as_listener);
542 } else {
543 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
544 }
545 cpu->tcg_as_listener->commit = tcg_commit;
546 memory_listener_register(cpu->tcg_as_listener, as);
547}
548#endif
549
Bharata B Raob7bca732015-06-23 19:31:13 -0700550#ifndef CONFIG_USER_ONLY
551static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
552
553static int cpu_get_free_index(Error **errp)
554{
555 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
556
557 if (cpu >= MAX_CPUMASK_BITS) {
558 error_setg(errp, "Trying to use more CPUs than max of %d",
559 MAX_CPUMASK_BITS);
560 return -1;
561 }
562
563 bitmap_set(cpu_index_map, cpu, 1);
564 return cpu;
565}
566
567void cpu_exec_exit(CPUState *cpu)
568{
569 if (cpu->cpu_index == -1) {
570 /* cpu_index was never allocated by this @cpu or was already freed. */
571 return;
572 }
573
574 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
575 cpu->cpu_index = -1;
576}
577#else
578
579static int cpu_get_free_index(Error **errp)
580{
581 CPUState *some_cpu;
582 int cpu_index = 0;
583
584 CPU_FOREACH(some_cpu) {
585 cpu_index++;
586 }
587 return cpu_index;
588}
589
590void cpu_exec_exit(CPUState *cpu)
591{
592}
593#endif
594
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700595void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000596{
Andreas Färberb170fce2013-01-20 20:23:22 +0100597 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000598 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700599 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000600
Eduardo Habkost291135b2015-04-27 17:00:33 -0300601#ifndef CONFIG_USER_ONLY
602 cpu->as = &address_space_memory;
603 cpu->thread_id = qemu_get_thread_id();
604 cpu_reload_memory_map(cpu);
605#endif
606
pbrookc2764712009-03-07 15:24:59 +0000607#if defined(CONFIG_USER_ONLY)
608 cpu_list_lock();
609#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700610 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
611 if (local_err) {
612 error_propagate(errp, local_err);
613#if defined(CONFIG_USER_ONLY)
614 cpu_list_unlock();
615#endif
616 return;
bellard6a00d602005-11-21 23:25:50 +0000617 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200618 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000619#if defined(CONFIG_USER_ONLY)
620 cpu_list_unlock();
621#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200622 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
623 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
624 }
pbrookb3c77242008-06-30 16:31:04 +0000625#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600626 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700627 cpu_save, cpu_load, cpu->env_ptr);
Andreas Färberb170fce2013-01-20 20:23:22 +0100628 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200629 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000630#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100631 if (cc->vmsd != NULL) {
632 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
633 }
bellardfd6ce8f2003-05-14 19:00:11 +0000634}
635
Paul Brook94df27f2010-02-28 23:47:45 +0000636#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200637static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000638{
639 tb_invalidate_phys_page_range(pc, pc + 1, 0);
640}
641#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200642static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400643{
Max Filippove8262a12013-09-27 22:29:17 +0400644 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
645 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000646 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100647 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400648 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400649}
bellardc27004e2005-01-03 23:35:10 +0000650#endif
bellardd720b932004-04-25 17:57:43 +0000651
Paul Brookc527ee82010-03-01 03:31:14 +0000652#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200653void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000654
655{
656}
657
Peter Maydell3ee887e2014-09-12 14:06:48 +0100658int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
659 int flags)
660{
661 return -ENOSYS;
662}
663
664void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
665{
666}
667
Andreas Färber75a34032013-09-02 16:57:02 +0200668int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000669 int flags, CPUWatchpoint **watchpoint)
670{
671 return -ENOSYS;
672}
673#else
pbrook6658ffb2007-03-16 23:58:11 +0000674/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200675int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000676 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000677{
aliguoric0ce9982008-11-25 22:13:57 +0000678 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000679
Peter Maydell05068c02014-09-12 14:06:48 +0100680 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700681 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200682 error_report("tried to set invalid watchpoint at %"
683 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000684 return -EINVAL;
685 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500686 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000687
aliguoria1d1bb32008-11-18 20:07:32 +0000688 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100689 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000690 wp->flags = flags;
691
aliguori2dc9f412008-11-18 20:56:59 +0000692 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200693 if (flags & BP_GDB) {
694 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
695 } else {
696 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
697 }
aliguoria1d1bb32008-11-18 20:07:32 +0000698
Andreas Färber31b030d2013-09-04 01:29:02 +0200699 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000700
701 if (watchpoint)
702 *watchpoint = wp;
703 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000704}
705
aliguoria1d1bb32008-11-18 20:07:32 +0000706/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200707int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000708 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000709{
aliguoria1d1bb32008-11-18 20:07:32 +0000710 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000711
Andreas Färberff4700b2013-08-26 18:23:18 +0200712 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100713 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000714 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200715 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000716 return 0;
717 }
718 }
aliguoria1d1bb32008-11-18 20:07:32 +0000719 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000720}
721
aliguoria1d1bb32008-11-18 20:07:32 +0000722/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200723void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000724{
Andreas Färberff4700b2013-08-26 18:23:18 +0200725 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000726
Andreas Färber31b030d2013-09-04 01:29:02 +0200727 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000728
Anthony Liguori7267c092011-08-20 22:09:37 -0500729 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000730}
731
aliguoria1d1bb32008-11-18 20:07:32 +0000732/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200733void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000734{
aliguoric0ce9982008-11-25 22:13:57 +0000735 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000736
Andreas Färberff4700b2013-08-26 18:23:18 +0200737 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200738 if (wp->flags & mask) {
739 cpu_watchpoint_remove_by_ref(cpu, wp);
740 }
aliguoric0ce9982008-11-25 22:13:57 +0000741 }
aliguoria1d1bb32008-11-18 20:07:32 +0000742}
Peter Maydell05068c02014-09-12 14:06:48 +0100743
744/* Return true if this watchpoint address matches the specified
745 * access (ie the address range covered by the watchpoint overlaps
746 * partially or completely with the address range covered by the
747 * access).
748 */
749static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
750 vaddr addr,
751 vaddr len)
752{
753 /* We know the lengths are non-zero, but a little caution is
754 * required to avoid errors in the case where the range ends
755 * exactly at the top of the address space and so addr + len
756 * wraps round to zero.
757 */
758 vaddr wpend = wp->vaddr + wp->len - 1;
759 vaddr addrend = addr + len - 1;
760
761 return !(addr > wpend || wp->vaddr > addrend);
762}
763
Paul Brookc527ee82010-03-01 03:31:14 +0000764#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000765
766/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200767int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000768 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000769{
aliguoric0ce9982008-11-25 22:13:57 +0000770 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000771
Anthony Liguori7267c092011-08-20 22:09:37 -0500772 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000773
774 bp->pc = pc;
775 bp->flags = flags;
776
aliguori2dc9f412008-11-18 20:56:59 +0000777 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200778 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200779 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200780 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200781 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200782 }
aliguoria1d1bb32008-11-18 20:07:32 +0000783
Andreas Färberf0c3c502013-08-26 21:22:53 +0200784 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000785
Andreas Färber00b941e2013-06-29 18:55:54 +0200786 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000787 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200788 }
aliguoria1d1bb32008-11-18 20:07:32 +0000789 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000790}
791
792/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200793int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000794{
aliguoria1d1bb32008-11-18 20:07:32 +0000795 CPUBreakpoint *bp;
796
Andreas Färberf0c3c502013-08-26 21:22:53 +0200797 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000798 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200799 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000800 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000801 }
bellard4c3a88a2003-07-26 12:06:08 +0000802 }
aliguoria1d1bb32008-11-18 20:07:32 +0000803 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000804}
805
aliguoria1d1bb32008-11-18 20:07:32 +0000806/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200807void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000808{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200809 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
810
811 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000812
Anthony Liguori7267c092011-08-20 22:09:37 -0500813 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000814}
815
816/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200817void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000818{
aliguoric0ce9982008-11-25 22:13:57 +0000819 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000820
Andreas Färberf0c3c502013-08-26 21:22:53 +0200821 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200822 if (bp->flags & mask) {
823 cpu_breakpoint_remove_by_ref(cpu, bp);
824 }
aliguoric0ce9982008-11-25 22:13:57 +0000825 }
bellard4c3a88a2003-07-26 12:06:08 +0000826}
827
bellardc33a3462003-07-29 20:50:33 +0000828/* enable or disable single step mode. EXCP_DEBUG is returned by the
829 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200830void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000831{
Andreas Färbered2803d2013-06-21 20:20:45 +0200832 if (cpu->singlestep_enabled != enabled) {
833 cpu->singlestep_enabled = enabled;
834 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200835 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200836 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100837 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000838 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700839 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000840 }
bellardc33a3462003-07-29 20:50:33 +0000841 }
bellardc33a3462003-07-29 20:50:33 +0000842}
843
Andreas Färbera47dddd2013-09-03 17:38:47 +0200844void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000845{
846 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000847 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000848
849 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000850 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000851 fprintf(stderr, "qemu: fatal: ");
852 vfprintf(stderr, fmt, ap);
853 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200854 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000855 if (qemu_log_enabled()) {
856 qemu_log("qemu: fatal: ");
857 qemu_log_vprintf(fmt, ap2);
858 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200859 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000860 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000861 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000862 }
pbrook493ae1f2007-11-23 16:53:59 +0000863 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000864 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200865#if defined(CONFIG_USER_ONLY)
866 {
867 struct sigaction act;
868 sigfillset(&act.sa_mask);
869 act.sa_handler = SIG_DFL;
870 sigaction(SIGABRT, &act, NULL);
871 }
872#endif
bellard75012672003-06-21 13:11:07 +0000873 abort();
874}
875
bellard01243112004-01-04 15:48:17 +0000876#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400877/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200878static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
879{
880 RAMBlock *block;
881
Paolo Bonzini43771532013-09-09 17:58:40 +0200882 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200883 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200884 goto found;
885 }
Mike Day0dc3f442013-09-05 14:41:35 -0400886 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200887 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200888 goto found;
889 }
890 }
891
892 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
893 abort();
894
895found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200896 /* It is safe to write mru_block outside the iothread lock. This
897 * is what happens:
898 *
899 * mru_block = xxx
900 * rcu_read_unlock()
901 * xxx removed from list
902 * rcu_read_lock()
903 * read mru_block
904 * mru_block = NULL;
905 * call_rcu(reclaim_ramblock, xxx);
906 * rcu_read_unlock()
907 *
908 * atomic_rcu_set is not needed here. The block was already published
909 * when it was placed into the list. Here we're just making an extra
910 * copy of the pointer.
911 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200912 ram_list.mru_block = block;
913 return block;
914}
915
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200916static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000917{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700918 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200919 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200920 RAMBlock *block;
921 ram_addr_t end;
922
923 end = TARGET_PAGE_ALIGN(start + length);
924 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000925
Mike Day0dc3f442013-09-05 14:41:35 -0400926 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200927 block = qemu_get_ram_block(start);
928 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200929 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700930 CPU_FOREACH(cpu) {
931 tlb_reset_dirty(cpu, start1, length);
932 }
Mike Day0dc3f442013-09-05 14:41:35 -0400933 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200934}
935
936/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000937bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
938 ram_addr_t length,
939 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200940{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000941 unsigned long end, page;
942 bool dirty;
Juan Quintelad24981d2012-05-22 00:42:40 +0200943
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000944 if (length == 0) {
945 return false;
946 }
947
948 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
949 page = start >> TARGET_PAGE_BITS;
950 dirty = bitmap_test_and_clear_atomic(ram_list.dirty_memory[client],
951 page, end - page);
952
953 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200954 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200955 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000956
957 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000958}
959
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100960/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200961hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200962 MemoryRegionSection *section,
963 target_ulong vaddr,
964 hwaddr paddr, hwaddr xlat,
965 int prot,
966 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000967{
Avi Kivitya8170e52012-10-23 12:30:10 +0200968 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000969 CPUWatchpoint *wp;
970
Blue Swirlcc5bea62012-04-14 14:56:48 +0000971 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000972 /* Normal RAM. */
973 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200974 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000975 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200976 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000977 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200978 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000979 }
980 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +0100981 AddressSpaceDispatch *d;
982
983 d = atomic_rcu_read(&section->address_space->dispatch);
984 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200985 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000986 }
987
988 /* Make accesses to pages with watchpoints go via the
989 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200990 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100991 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000992 /* Avoid trapping reads of pages with a write breakpoint. */
993 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200994 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000995 *address |= TLB_MMIO;
996 break;
997 }
998 }
999 }
1000
1001 return iotlb;
1002}
bellard9fa3e852004-01-04 18:06:42 +00001003#endif /* defined(CONFIG_USER_ONLY) */
1004
pbrooke2eef172008-06-08 01:09:01 +00001005#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001006
Anthony Liguoric227f092009-10-01 16:12:16 -05001007static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001008 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001009static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001010
Igor Mammedova2b257d2014-10-31 16:38:37 +00001011static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1012 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001013
1014/*
1015 * Set a custom physical guest memory alloator.
1016 * Accelerators with unusual needs may need this. Hopefully, we can
1017 * get rid of it eventually.
1018 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001019void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001020{
1021 phys_mem_alloc = alloc;
1022}
1023
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001024static uint16_t phys_section_add(PhysPageMap *map,
1025 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001026{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001027 /* The physical section number is ORed with a page-aligned
1028 * pointer to produce the iotlb entries. Thus it should
1029 * never overflow into the page-aligned value.
1030 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001031 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001032
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001033 if (map->sections_nb == map->sections_nb_alloc) {
1034 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1035 map->sections = g_renew(MemoryRegionSection, map->sections,
1036 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001037 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001038 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001039 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001040 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001041}
1042
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001043static void phys_section_destroy(MemoryRegion *mr)
1044{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001045 memory_region_unref(mr);
1046
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001047 if (mr->subpage) {
1048 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001049 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001050 g_free(subpage);
1051 }
1052}
1053
Paolo Bonzini60926662013-05-29 12:30:26 +02001054static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001055{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001056 while (map->sections_nb > 0) {
1057 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001058 phys_section_destroy(section->mr);
1059 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001060 g_free(map->sections);
1061 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001062}
1063
Avi Kivityac1970f2012-10-03 16:22:53 +02001064static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001065{
1066 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001067 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001068 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001069 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001070 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001071 MemoryRegionSection subsection = {
1072 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001073 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001074 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001075 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001076
Avi Kivityf3705d52012-03-08 16:16:34 +02001077 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001078
Avi Kivityf3705d52012-03-08 16:16:34 +02001079 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001080 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001081 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001082 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001083 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001084 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001085 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001086 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001087 }
1088 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001089 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001090 subpage_register(subpage, start, end,
1091 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001092}
1093
1094
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001095static void register_multipage(AddressSpaceDispatch *d,
1096 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001097{
Avi Kivitya8170e52012-10-23 12:30:10 +02001098 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001100 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1101 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001102
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001103 assert(num_pages);
1104 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001105}
1106
Avi Kivityac1970f2012-10-03 16:22:53 +02001107static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001108{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001109 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001110 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001111 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001112 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001113
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001114 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1115 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1116 - now.offset_within_address_space;
1117
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001118 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001119 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001120 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001121 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001122 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001123 while (int128_ne(remain.size, now.size)) {
1124 remain.size = int128_sub(remain.size, now.size);
1125 remain.offset_within_address_space += int128_get64(now.size);
1126 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001127 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001128 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001129 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001130 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001131 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001132 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001133 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001134 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001135 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001136 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001137 }
1138}
1139
Sheng Yang62a27442010-01-26 19:21:16 +08001140void qemu_flush_coalesced_mmio_buffer(void)
1141{
1142 if (kvm_enabled())
1143 kvm_flush_coalesced_mmio_buffer();
1144}
1145
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001146void qemu_mutex_lock_ramlist(void)
1147{
1148 qemu_mutex_lock(&ram_list.mutex);
1149}
1150
1151void qemu_mutex_unlock_ramlist(void)
1152{
1153 qemu_mutex_unlock(&ram_list.mutex);
1154}
1155
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001156#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001157
1158#include <sys/vfs.h>
1159
1160#define HUGETLBFS_MAGIC 0x958458f6
1161
Hu Taofc7a5802014-09-09 13:28:01 +08001162static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001163{
1164 struct statfs fs;
1165 int ret;
1166
1167 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001168 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001169 } while (ret != 0 && errno == EINTR);
1170
1171 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001172 error_setg_errno(errp, errno, "failed to get page size of file %s",
1173 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001174 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001175 }
1176
1177 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001178 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001179
1180 return fs.f_bsize;
1181}
1182
Alex Williamson04b16652010-07-02 11:13:17 -06001183static void *file_ram_alloc(RAMBlock *block,
1184 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001185 const char *path,
1186 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001187{
1188 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001189 char *sanitized_name;
1190 char *c;
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001191 void *ptr;
Hu Tao557529d2014-09-09 13:28:00 +08001192 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001193 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001194 uint64_t hpagesize;
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001195 uint64_t total;
Hu Taofc7a5802014-09-09 13:28:01 +08001196 Error *local_err = NULL;
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001197 size_t offset;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001198
Hu Taofc7a5802014-09-09 13:28:01 +08001199 hpagesize = gethugepagesize(path, &local_err);
1200 if (local_err) {
1201 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001202 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001203 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001204 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001205
1206 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001207 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1208 "or larger than huge page size 0x%" PRIx64,
1209 memory, hpagesize);
1210 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001211 }
1212
1213 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001214 error_setg(errp,
1215 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001216 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001217 }
1218
Peter Feiner8ca761f2013-03-04 13:54:25 -05001219 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001220 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001221 for (c = sanitized_name; *c != '\0'; c++) {
1222 if (*c == '/')
1223 *c = '_';
1224 }
1225
1226 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1227 sanitized_name);
1228 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001229
1230 fd = mkstemp(filename);
1231 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001232 error_setg_errno(errp, errno,
1233 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001234 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001235 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236 }
1237 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001238 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001239
Chen Hanxiao9284f312015-07-24 11:12:03 +08001240 memory = ROUND_UP(memory, hpagesize);
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001241 total = memory + hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001242
1243 /*
1244 * ftruncate is not supported by hugetlbfs in older
1245 * hosts, so don't bother bailing out on errors.
1246 * If anything goes wrong with it under other filesystems,
1247 * mmap will fail.
1248 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001249 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001250 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001251 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001252
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001253 ptr = mmap(0, total, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS,
1254 -1, 0);
1255 if (ptr == MAP_FAILED) {
1256 error_setg_errno(errp, errno,
1257 "unable to allocate memory range for hugepages");
1258 close(fd);
1259 goto error;
1260 }
1261
1262 offset = QEMU_ALIGN_UP((uintptr_t)ptr, hpagesize) - (uintptr_t)ptr;
1263
1264 area = mmap(ptr + offset, memory, PROT_READ | PROT_WRITE,
1265 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE) |
1266 MAP_FIXED,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001267 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001268 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001269 error_setg_errno(errp, errno,
1270 "unable to map backing store for hugepages");
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001271 munmap(ptr, total);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001272 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001273 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001274 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001275
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001276 if (offset > 0) {
1277 munmap(ptr, offset);
1278 }
1279 ptr += offset;
1280 total -= offset;
1281
1282 if (total > memory + getpagesize()) {
1283 munmap(ptr + memory + getpagesize(),
1284 total - memory - getpagesize());
1285 }
1286
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001287 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001288 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001289 }
1290
Alex Williamson04b16652010-07-02 11:13:17 -06001291 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001292 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001293
1294error:
1295 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001296 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001297 exit(1);
1298 }
1299 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001300}
1301#endif
1302
Mike Day0dc3f442013-09-05 14:41:35 -04001303/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001304static ram_addr_t find_ram_offset(ram_addr_t size)
1305{
Alex Williamson04b16652010-07-02 11:13:17 -06001306 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001307 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001308
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001309 assert(size != 0); /* it would hand out same offset multiple times */
1310
Mike Day0dc3f442013-09-05 14:41:35 -04001311 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001312 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001313 }
Alex Williamson04b16652010-07-02 11:13:17 -06001314
Mike Day0dc3f442013-09-05 14:41:35 -04001315 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001316 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001317
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001318 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001319
Mike Day0dc3f442013-09-05 14:41:35 -04001320 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001321 if (next_block->offset >= end) {
1322 next = MIN(next, next_block->offset);
1323 }
1324 }
1325 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001326 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001327 mingap = next - end;
1328 }
1329 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001330
1331 if (offset == RAM_ADDR_MAX) {
1332 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1333 (uint64_t)size);
1334 abort();
1335 }
1336
Alex Williamson04b16652010-07-02 11:13:17 -06001337 return offset;
1338}
1339
Juan Quintela652d7ec2012-07-20 10:37:54 +02001340ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001341{
Alex Williamsond17b5282010-06-25 11:08:38 -06001342 RAMBlock *block;
1343 ram_addr_t last = 0;
1344
Mike Day0dc3f442013-09-05 14:41:35 -04001345 rcu_read_lock();
1346 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001347 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001348 }
Mike Day0dc3f442013-09-05 14:41:35 -04001349 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001350 return last;
1351}
1352
Jason Baronddb97f12012-08-02 15:44:16 -04001353static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1354{
1355 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001356
1357 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001358 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001359 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1360 if (ret) {
1361 perror("qemu_madvise");
1362 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1363 "but dump_guest_core=off specified\n");
1364 }
1365 }
1366}
1367
Mike Day0dc3f442013-09-05 14:41:35 -04001368/* Called within an RCU critical section, or while the ramlist lock
1369 * is held.
1370 */
Hu Tao20cfe882014-04-02 15:13:26 +08001371static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001372{
Hu Tao20cfe882014-04-02 15:13:26 +08001373 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001374
Mike Day0dc3f442013-09-05 14:41:35 -04001375 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001376 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001377 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001378 }
1379 }
Hu Tao20cfe882014-04-02 15:13:26 +08001380
1381 return NULL;
1382}
1383
Mike Dayae3a7042013-09-05 14:41:35 -04001384/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001385void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1386{
Mike Dayae3a7042013-09-05 14:41:35 -04001387 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001388
Mike Day0dc3f442013-09-05 14:41:35 -04001389 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001390 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001391 assert(new_block);
1392 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001393
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001394 if (dev) {
1395 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001396 if (id) {
1397 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001398 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001399 }
1400 }
1401 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1402
Mike Day0dc3f442013-09-05 14:41:35 -04001403 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001404 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001405 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1406 new_block->idstr);
1407 abort();
1408 }
1409 }
Mike Day0dc3f442013-09-05 14:41:35 -04001410 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001411}
1412
Mike Dayae3a7042013-09-05 14:41:35 -04001413/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001414void qemu_ram_unset_idstr(ram_addr_t addr)
1415{
Mike Dayae3a7042013-09-05 14:41:35 -04001416 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001417
Mike Dayae3a7042013-09-05 14:41:35 -04001418 /* FIXME: arch_init.c assumes that this is not called throughout
1419 * migration. Ignore the problem since hot-unplug during migration
1420 * does not work anyway.
1421 */
1422
Mike Day0dc3f442013-09-05 14:41:35 -04001423 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001424 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001425 if (block) {
1426 memset(block->idstr, 0, sizeof(block->idstr));
1427 }
Mike Day0dc3f442013-09-05 14:41:35 -04001428 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001429}
1430
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001431static int memory_try_enable_merging(void *addr, size_t len)
1432{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001433 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001434 /* disabled by the user */
1435 return 0;
1436 }
1437
1438 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1439}
1440
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001441/* Only legal before guest might have detected the memory size: e.g. on
1442 * incoming migration, or right after reset.
1443 *
1444 * As memory core doesn't know how is memory accessed, it is up to
1445 * resize callback to update device state and/or add assertions to detect
1446 * misuse, if necessary.
1447 */
1448int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1449{
1450 RAMBlock *block = find_ram_block(base);
1451
1452 assert(block);
1453
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001454 newsize = TARGET_PAGE_ALIGN(newsize);
1455
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001456 if (block->used_length == newsize) {
1457 return 0;
1458 }
1459
1460 if (!(block->flags & RAM_RESIZEABLE)) {
1461 error_setg_errno(errp, EINVAL,
1462 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1463 " in != 0x" RAM_ADDR_FMT, block->idstr,
1464 newsize, block->used_length);
1465 return -EINVAL;
1466 }
1467
1468 if (block->max_length < newsize) {
1469 error_setg_errno(errp, EINVAL,
1470 "Length too large: %s: 0x" RAM_ADDR_FMT
1471 " > 0x" RAM_ADDR_FMT, block->idstr,
1472 newsize, block->max_length);
1473 return -EINVAL;
1474 }
1475
1476 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1477 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001478 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1479 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001480 memory_region_set_size(block->mr, newsize);
1481 if (block->resized) {
1482 block->resized(block->idstr, newsize, block->host);
1483 }
1484 return 0;
1485}
1486
Hu Taoef701d72014-09-09 13:27:54 +08001487static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001488{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001489 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001490 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001491 ram_addr_t old_ram_size, new_ram_size;
1492
1493 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001494
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001495 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001496 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001497
1498 if (!new_block->host) {
1499 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001500 xen_ram_alloc(new_block->offset, new_block->max_length,
1501 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001502 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001503 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001504 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001505 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001506 error_setg_errno(errp, errno,
1507 "cannot set up guest memory '%s'",
1508 memory_region_name(new_block->mr));
1509 qemu_mutex_unlock_ramlist();
1510 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001511 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001512 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001513 }
1514 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001515
Li Zhijiandd631692015-07-02 20:18:06 +08001516 new_ram_size = MAX(old_ram_size,
1517 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1518 if (new_ram_size > old_ram_size) {
1519 migration_bitmap_extend(old_ram_size, new_ram_size);
1520 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001521 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1522 * QLIST (which has an RCU-friendly variant) does not have insertion at
1523 * tail, so save the last element in last_block.
1524 */
Mike Day0dc3f442013-09-05 14:41:35 -04001525 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001526 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001527 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001528 break;
1529 }
1530 }
1531 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001532 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001533 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001534 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001535 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001536 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001537 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001538 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001539
Mike Day0dc3f442013-09-05 14:41:35 -04001540 /* Write list before version */
1541 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001542 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001543 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001544
Juan Quintela2152f5c2013-10-08 13:52:02 +02001545 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1546
1547 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001548 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001549
1550 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001551 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1552 ram_list.dirty_memory[i] =
1553 bitmap_zero_extend(ram_list.dirty_memory[i],
1554 old_ram_size, new_ram_size);
1555 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001556 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001557 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001558 new_block->used_length,
1559 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001560
Paolo Bonzinia904c912015-01-21 16:18:35 +01001561 if (new_block->host) {
1562 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1563 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1564 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1565 if (kvm_enabled()) {
1566 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1567 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001568 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001569
1570 return new_block->offset;
1571}
1572
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001573#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001574ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001575 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001576 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577{
1578 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001579 ram_addr_t addr;
1580 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001581
1582 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001583 error_setg(errp, "-mem-path not supported with Xen");
1584 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001585 }
1586
1587 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1588 /*
1589 * file_ram_alloc() needs to allocate just like
1590 * phys_mem_alloc, but we haven't bothered to provide
1591 * a hook there.
1592 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001593 error_setg(errp,
1594 "-mem-path not supported with this accelerator");
1595 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001596 }
1597
1598 size = TARGET_PAGE_ALIGN(size);
1599 new_block = g_malloc0(sizeof(*new_block));
1600 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001601 new_block->used_length = size;
1602 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001603 new_block->flags = share ? RAM_SHARED : 0;
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001604 new_block->flags |= RAM_EXTRA;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001605 new_block->host = file_ram_alloc(new_block, size,
1606 mem_path, errp);
1607 if (!new_block->host) {
1608 g_free(new_block);
1609 return -1;
1610 }
1611
Hu Taoef701d72014-09-09 13:27:54 +08001612 addr = ram_block_add(new_block, &local_err);
1613 if (local_err) {
1614 g_free(new_block);
1615 error_propagate(errp, local_err);
1616 return -1;
1617 }
1618 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001619}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001620#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001621
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001622static
1623ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1624 void (*resized)(const char*,
1625 uint64_t length,
1626 void *host),
1627 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001628 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001629{
1630 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001631 ram_addr_t addr;
1632 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001633
1634 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001635 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636 new_block = g_malloc0(sizeof(*new_block));
1637 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001638 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001639 new_block->used_length = size;
1640 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001641 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001642 new_block->fd = -1;
1643 new_block->host = host;
1644 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001645 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001646 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001647 if (resizeable) {
1648 new_block->flags |= RAM_RESIZEABLE;
1649 }
Hu Taoef701d72014-09-09 13:27:54 +08001650 addr = ram_block_add(new_block, &local_err);
1651 if (local_err) {
1652 g_free(new_block);
1653 error_propagate(errp, local_err);
1654 return -1;
1655 }
1656 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657}
1658
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001659ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1660 MemoryRegion *mr, Error **errp)
1661{
1662 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1663}
1664
Hu Taoef701d72014-09-09 13:27:54 +08001665ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001666{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001667 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1668}
1669
1670ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1671 void (*resized)(const char*,
1672 uint64_t length,
1673 void *host),
1674 MemoryRegion *mr, Error **errp)
1675{
1676 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001677}
bellarde9a1ab12007-02-08 23:08:38 +00001678
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001679void qemu_ram_free_from_ptr(ram_addr_t addr)
1680{
1681 RAMBlock *block;
1682
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001683 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001684 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001685 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001686 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001687 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001688 /* Write list before version */
1689 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001690 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001691 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001692 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001693 }
1694 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001695 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001696}
1697
Paolo Bonzini43771532013-09-09 17:58:40 +02001698static void reclaim_ramblock(RAMBlock *block)
1699{
1700 if (block->flags & RAM_PREALLOC) {
1701 ;
1702 } else if (xen_enabled()) {
1703 xen_invalidate_map_cache_entry(block->host);
1704#ifndef _WIN32
1705 } else if (block->fd >= 0) {
Michael S. Tsirkin8561c922015-09-10 16:41:17 +03001706 if (block->flags & RAM_EXTRA) {
1707 munmap(block->host, block->max_length + getpagesize());
1708 } else {
1709 munmap(block->host, block->max_length);
1710 }
Paolo Bonzini43771532013-09-09 17:58:40 +02001711 close(block->fd);
1712#endif
1713 } else {
1714 qemu_anon_ram_free(block->host, block->max_length);
1715 }
1716 g_free(block);
1717}
1718
Anthony Liguoric227f092009-10-01 16:12:16 -05001719void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001720{
Alex Williamson04b16652010-07-02 11:13:17 -06001721 RAMBlock *block;
1722
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001723 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001724 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001725 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001726 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001727 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001728 /* Write list before version */
1729 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001730 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001731 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001732 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001733 }
1734 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001735 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001736}
1737
Huang Yingcd19cfa2011-03-02 08:56:19 +01001738#ifndef _WIN32
1739void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1740{
1741 RAMBlock *block;
1742 ram_addr_t offset;
1743 int flags;
1744 void *area, *vaddr;
1745
Mike Day0dc3f442013-09-05 14:41:35 -04001746 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001747 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001748 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001749 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001750 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001751 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001752 } else if (xen_enabled()) {
1753 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001754 } else {
1755 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001756 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001757 flags |= (block->flags & RAM_SHARED ?
1758 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001759 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1760 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001761 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001762 /*
1763 * Remap needs to match alloc. Accelerators that
1764 * set phys_mem_alloc never remap. If they did,
1765 * we'd need a remap hook here.
1766 */
1767 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1768
Huang Yingcd19cfa2011-03-02 08:56:19 +01001769 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1770 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1771 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001772 }
1773 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001774 fprintf(stderr, "Could not remap addr: "
1775 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001776 length, addr);
1777 exit(1);
1778 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001779 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001780 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001781 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001782 }
1783 }
1784}
1785#endif /* !_WIN32 */
1786
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001787int qemu_get_ram_fd(ram_addr_t addr)
1788{
Mike Dayae3a7042013-09-05 14:41:35 -04001789 RAMBlock *block;
1790 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001791
Mike Day0dc3f442013-09-05 14:41:35 -04001792 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001793 block = qemu_get_ram_block(addr);
1794 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001795 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001796 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001797}
1798
Damjan Marion3fd74b82014-06-26 23:01:32 +02001799void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1800{
Mike Dayae3a7042013-09-05 14:41:35 -04001801 RAMBlock *block;
1802 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001803
Mike Day0dc3f442013-09-05 14:41:35 -04001804 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001805 block = qemu_get_ram_block(addr);
1806 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001807 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001808 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001809}
1810
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001811/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001812 * This should not be used for general purpose DMA. Use address_space_map
1813 * or address_space_rw instead. For local memory (e.g. video ram) that the
1814 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001815 *
1816 * By the time this function returns, the returned pointer is not protected
1817 * by RCU anymore. If the caller is not within an RCU critical section and
1818 * does not hold the iothread lock, it must have other means of protecting the
1819 * pointer, such as a reference to the region that includes the incoming
1820 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001821 */
1822void *qemu_get_ram_ptr(ram_addr_t addr)
1823{
Mike Dayae3a7042013-09-05 14:41:35 -04001824 RAMBlock *block;
1825 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001826
Mike Day0dc3f442013-09-05 14:41:35 -04001827 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001828 block = qemu_get_ram_block(addr);
1829
1830 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001831 /* We need to check if the requested address is in the RAM
1832 * because we don't want to map the entire memory in QEMU.
1833 * In that case just map until the end of the page.
1834 */
1835 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001836 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001837 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001838 }
Mike Dayae3a7042013-09-05 14:41:35 -04001839
1840 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001841 }
Mike Dayae3a7042013-09-05 14:41:35 -04001842 ptr = ramblock_ptr(block, addr - block->offset);
1843
Mike Day0dc3f442013-09-05 14:41:35 -04001844unlock:
1845 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001846 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001847}
1848
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001849/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001850 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001851 *
1852 * By the time this function returns, the returned pointer is not protected
1853 * by RCU anymore. If the caller is not within an RCU critical section and
1854 * does not hold the iothread lock, it must have other means of protecting the
1855 * pointer, such as a reference to the region that includes the incoming
1856 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001857 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001858static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001859{
Mike Dayae3a7042013-09-05 14:41:35 -04001860 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001861 if (*size == 0) {
1862 return NULL;
1863 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001864 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001865 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001866 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001867 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001868 rcu_read_lock();
1869 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001870 if (addr - block->offset < block->max_length) {
1871 if (addr - block->offset + *size > block->max_length)
1872 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001873 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001874 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001875 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001876 }
1877 }
1878
1879 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1880 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001881 }
1882}
1883
Paolo Bonzini7443b432013-06-03 12:44:02 +02001884/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001885 * (typically a TLB entry) back to a ram offset.
1886 *
1887 * By the time this function returns, the returned pointer is not protected
1888 * by RCU anymore. If the caller is not within an RCU critical section and
1889 * does not hold the iothread lock, it must have other means of protecting the
1890 * pointer, such as a reference to the region that includes the incoming
1891 * ram_addr_t.
1892 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001893MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001894{
pbrook94a6b542009-04-11 17:15:54 +00001895 RAMBlock *block;
1896 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001897 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001898
Jan Kiszka868bb332011-06-21 22:59:09 +02001899 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001900 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001901 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001902 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001903 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001904 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001905 }
1906
Mike Day0dc3f442013-09-05 14:41:35 -04001907 rcu_read_lock();
1908 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001909 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001910 goto found;
1911 }
1912
Mike Day0dc3f442013-09-05 14:41:35 -04001913 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001914 /* This case append when the block is not mapped. */
1915 if (block->host == NULL) {
1916 continue;
1917 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001918 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001919 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001920 }
pbrook94a6b542009-04-11 17:15:54 +00001921 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001922
Mike Day0dc3f442013-09-05 14:41:35 -04001923 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001924 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001925
1926found:
1927 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001928 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001929 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001930 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001931}
Alex Williamsonf471a172010-06-11 11:11:42 -06001932
Avi Kivitya8170e52012-10-23 12:30:10 +02001933static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001934 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001935{
Juan Quintela52159192013-10-08 12:44:04 +02001936 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001937 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001938 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001939 switch (size) {
1940 case 1:
1941 stb_p(qemu_get_ram_ptr(ram_addr), val);
1942 break;
1943 case 2:
1944 stw_p(qemu_get_ram_ptr(ram_addr), val);
1945 break;
1946 case 4:
1947 stl_p(qemu_get_ram_ptr(ram_addr), val);
1948 break;
1949 default:
1950 abort();
1951 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001952 /* Set both VGA and migration bits for simplicity and to remove
1953 * the notdirty callback faster.
1954 */
1955 cpu_physical_memory_set_dirty_range(ram_addr, size,
1956 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001957 /* we remove the notdirty callback only if the code has been
1958 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001959 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001960 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001961 }
bellard1ccde1c2004-02-06 19:46:14 +00001962}
1963
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001964static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1965 unsigned size, bool is_write)
1966{
1967 return is_write;
1968}
1969
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001970static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001971 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001972 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001973 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001974};
1975
pbrook0f459d12008-06-09 00:20:13 +00001976/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001977static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001978{
Andreas Färber93afead2013-08-26 03:41:01 +02001979 CPUState *cpu = current_cpu;
1980 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001981 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001982 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001983 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001984 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001985
Andreas Färberff4700b2013-08-26 18:23:18 +02001986 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001987 /* We re-entered the check after replacing the TB. Now raise
1988 * the debug interrupt so that is will trigger after the
1989 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001990 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001991 return;
1992 }
Andreas Färber93afead2013-08-26 03:41:01 +02001993 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001994 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001995 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1996 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001997 if (flags == BP_MEM_READ) {
1998 wp->flags |= BP_WATCHPOINT_HIT_READ;
1999 } else {
2000 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2001 }
2002 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002003 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002004 if (!cpu->watchpoint_hit) {
2005 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002006 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002007 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002008 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002009 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002010 } else {
2011 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002012 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002013 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002014 }
aliguori06d55cc2008-11-18 20:24:06 +00002015 }
aliguori6e140f22008-11-18 20:37:55 +00002016 } else {
2017 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002018 }
2019 }
2020}
2021
pbrook6658ffb2007-03-16 23:58:11 +00002022/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2023 so these check for a hit then pass through to the normal out-of-line
2024 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002025static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2026 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002027{
Peter Maydell66b9b432015-04-26 16:49:24 +01002028 MemTxResult res;
2029 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00002030
Peter Maydell66b9b432015-04-26 16:49:24 +01002031 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002032 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002033 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01002034 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002035 break;
2036 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01002037 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002038 break;
2039 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01002040 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002041 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002042 default: abort();
2043 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002044 *pdata = data;
2045 return res;
2046}
2047
2048static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2049 uint64_t val, unsigned size,
2050 MemTxAttrs attrs)
2051{
2052 MemTxResult res;
2053
2054 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2055 switch (size) {
2056 case 1:
2057 address_space_stb(&address_space_memory, addr, val, attrs, &res);
2058 break;
2059 case 2:
2060 address_space_stw(&address_space_memory, addr, val, attrs, &res);
2061 break;
2062 case 4:
2063 address_space_stl(&address_space_memory, addr, val, attrs, &res);
2064 break;
2065 default: abort();
2066 }
2067 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002068}
2069
Avi Kivity1ec9b902012-01-02 12:47:48 +02002070static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002071 .read_with_attrs = watch_mem_read,
2072 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002073 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002074};
pbrook6658ffb2007-03-16 23:58:11 +00002075
Peter Maydellf25a49e2015-04-26 16:49:24 +01002076static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2077 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002078{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002079 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002080 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002081 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002082
blueswir1db7b5422007-05-26 17:36:03 +00002083#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002084 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002085 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002086#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002087 res = address_space_read(subpage->as, addr + subpage->base,
2088 attrs, buf, len);
2089 if (res) {
2090 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002091 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002092 switch (len) {
2093 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002094 *data = ldub_p(buf);
2095 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002096 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002097 *data = lduw_p(buf);
2098 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002099 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002100 *data = ldl_p(buf);
2101 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002102 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002103 *data = ldq_p(buf);
2104 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002105 default:
2106 abort();
2107 }
blueswir1db7b5422007-05-26 17:36:03 +00002108}
2109
Peter Maydellf25a49e2015-04-26 16:49:24 +01002110static MemTxResult subpage_write(void *opaque, hwaddr addr,
2111 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002112{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002113 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002114 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002115
blueswir1db7b5422007-05-26 17:36:03 +00002116#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002117 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002118 " value %"PRIx64"\n",
2119 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002120#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002121 switch (len) {
2122 case 1:
2123 stb_p(buf, value);
2124 break;
2125 case 2:
2126 stw_p(buf, value);
2127 break;
2128 case 4:
2129 stl_p(buf, value);
2130 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002131 case 8:
2132 stq_p(buf, value);
2133 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002134 default:
2135 abort();
2136 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002137 return address_space_write(subpage->as, addr + subpage->base,
2138 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002139}
2140
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002141static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002142 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002143{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002144 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002145#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002146 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002147 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002148#endif
2149
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002150 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002151 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002152}
2153
Avi Kivity70c68e42012-01-02 12:32:48 +02002154static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002155 .read_with_attrs = subpage_read,
2156 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002157 .impl.min_access_size = 1,
2158 .impl.max_access_size = 8,
2159 .valid.min_access_size = 1,
2160 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002161 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002162 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002163};
2164
Anthony Liguoric227f092009-10-01 16:12:16 -05002165static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002166 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002167{
2168 int idx, eidx;
2169
2170 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2171 return -1;
2172 idx = SUBPAGE_IDX(start);
2173 eidx = SUBPAGE_IDX(end);
2174#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002175 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2176 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002177#endif
blueswir1db7b5422007-05-26 17:36:03 +00002178 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002179 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002180 }
2181
2182 return 0;
2183}
2184
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002185static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002186{
Anthony Liguoric227f092009-10-01 16:12:16 -05002187 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002188
Anthony Liguori7267c092011-08-20 22:09:37 -05002189 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002190
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002191 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002192 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002193 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002194 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002195 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002196#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002197 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2198 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002199#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002200 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002201
2202 return mmio;
2203}
2204
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002205static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2206 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002207{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002208 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002209 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002210 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002211 .mr = mr,
2212 .offset_within_address_space = 0,
2213 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002214 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002215 };
2216
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002217 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002218}
2219
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002220MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002221{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002222 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2223 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002224
2225 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002226}
2227
Avi Kivitye9179ce2009-06-14 11:38:52 +03002228static void io_mem_init(void)
2229{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002230 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002231 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002232 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002233 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002234 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002235 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002236 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002237}
2238
Avi Kivityac1970f2012-10-03 16:22:53 +02002239static void mem_begin(MemoryListener *listener)
2240{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002241 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002242 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2243 uint16_t n;
2244
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002245 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002246 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002247 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002248 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002249 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002250 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002251 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002252 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002253
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002254 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002255 d->as = as;
2256 as->next_dispatch = d;
2257}
2258
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002259static void address_space_dispatch_free(AddressSpaceDispatch *d)
2260{
2261 phys_sections_free(&d->map);
2262 g_free(d);
2263}
2264
Paolo Bonzini00752702013-05-29 12:13:54 +02002265static void mem_commit(MemoryListener *listener)
2266{
2267 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002268 AddressSpaceDispatch *cur = as->dispatch;
2269 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002270
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002271 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002272
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002273 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002274 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002275 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002276 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002277}
2278
Avi Kivity1d711482012-10-02 18:54:45 +02002279static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002280{
Andreas Färber182735e2013-05-29 22:29:20 +02002281 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002282
2283 /* since each CPU stores ram addresses in its TLB cache, we must
2284 reset the modified entries */
2285 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002286 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002287 /* FIXME: Disentangle the cpu.h circular files deps so we can
2288 directly get the right CPU from listener. */
2289 if (cpu->tcg_as_listener != listener) {
2290 continue;
2291 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002292 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002293 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002294}
2295
Avi Kivityac1970f2012-10-03 16:22:53 +02002296void address_space_init_dispatch(AddressSpace *as)
2297{
Paolo Bonzini00752702013-05-29 12:13:54 +02002298 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002299 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002300 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002301 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002302 .region_add = mem_add,
2303 .region_nop = mem_add,
2304 .priority = 0,
2305 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002306 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002307}
2308
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002309void address_space_unregister(AddressSpace *as)
2310{
2311 memory_listener_unregister(&as->dispatch_listener);
2312}
2313
Avi Kivity83f3c252012-10-07 12:59:55 +02002314void address_space_destroy_dispatch(AddressSpace *as)
2315{
2316 AddressSpaceDispatch *d = as->dispatch;
2317
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002318 atomic_rcu_set(&as->dispatch, NULL);
2319 if (d) {
2320 call_rcu(d, address_space_dispatch_free, rcu);
2321 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002322}
2323
Avi Kivity62152b82011-07-26 14:26:14 +03002324static void memory_map_init(void)
2325{
Anthony Liguori7267c092011-08-20 22:09:37 -05002326 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002327
Paolo Bonzini57271d62013-11-07 17:14:37 +01002328 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002329 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002330
Anthony Liguori7267c092011-08-20 22:09:37 -05002331 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002332 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2333 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002334 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002335}
2336
2337MemoryRegion *get_system_memory(void)
2338{
2339 return system_memory;
2340}
2341
Avi Kivity309cb472011-08-08 16:09:03 +03002342MemoryRegion *get_system_io(void)
2343{
2344 return system_io;
2345}
2346
pbrooke2eef172008-06-08 01:09:01 +00002347#endif /* !defined(CONFIG_USER_ONLY) */
2348
bellard13eb76e2004-01-24 15:23:36 +00002349/* physical memory access (slow version, mainly for debug) */
2350#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002351int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002352 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002353{
2354 int l, flags;
2355 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002356 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002357
2358 while (len > 0) {
2359 page = addr & TARGET_PAGE_MASK;
2360 l = (page + TARGET_PAGE_SIZE) - addr;
2361 if (l > len)
2362 l = len;
2363 flags = page_get_flags(page);
2364 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002365 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002366 if (is_write) {
2367 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002368 return -1;
bellard579a97f2007-11-11 14:26:47 +00002369 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002370 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002371 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002372 memcpy(p, buf, l);
2373 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002374 } else {
2375 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002376 return -1;
bellard579a97f2007-11-11 14:26:47 +00002377 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002378 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002379 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002380 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002381 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002382 }
2383 len -= l;
2384 buf += l;
2385 addr += l;
2386 }
Paul Brooka68fe892010-03-01 00:08:59 +00002387 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002388}
bellard8df1cd02005-01-28 22:37:22 +00002389
bellard13eb76e2004-01-24 15:23:36 +00002390#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002391
Paolo Bonzini845b6212015-03-23 11:45:53 +01002392static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002393 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002394{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002395 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2396 /* No early return if dirty_log_mask is or becomes 0, because
2397 * cpu_physical_memory_set_dirty_range will still call
2398 * xen_modified_memory.
2399 */
2400 if (dirty_log_mask) {
2401 dirty_log_mask =
2402 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002403 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002404 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2405 tb_invalidate_phys_range(addr, addr + length);
2406 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2407 }
2408 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002409}
2410
Richard Henderson23326162013-07-08 14:55:59 -07002411static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002412{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002413 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002414
2415 /* Regions are assumed to support 1-4 byte accesses unless
2416 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002417 if (access_size_max == 0) {
2418 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002419 }
Richard Henderson23326162013-07-08 14:55:59 -07002420
2421 /* Bound the maximum access by the alignment of the address. */
2422 if (!mr->ops->impl.unaligned) {
2423 unsigned align_size_max = addr & -addr;
2424 if (align_size_max != 0 && align_size_max < access_size_max) {
2425 access_size_max = align_size_max;
2426 }
2427 }
2428
2429 /* Don't attempt accesses larger than the maximum. */
2430 if (l > access_size_max) {
2431 l = access_size_max;
2432 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002433 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002434
2435 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002436}
2437
Jan Kiszka4840f102015-06-18 18:47:22 +02002438static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002439{
Jan Kiszka4840f102015-06-18 18:47:22 +02002440 bool unlocked = !qemu_mutex_iothread_locked();
2441 bool release_lock = false;
2442
2443 if (unlocked && mr->global_locking) {
2444 qemu_mutex_lock_iothread();
2445 unlocked = false;
2446 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002447 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002448 if (mr->flush_coalesced_mmio) {
2449 if (unlocked) {
2450 qemu_mutex_lock_iothread();
2451 }
2452 qemu_flush_coalesced_mmio_buffer();
2453 if (unlocked) {
2454 qemu_mutex_unlock_iothread();
2455 }
2456 }
2457
2458 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002459}
2460
Peter Maydell5c9eb022015-04-26 16:49:24 +01002461MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2462 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002463{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002464 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002465 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002466 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002467 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002468 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002469 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002470 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002471
Paolo Bonzini41063e12015-03-18 14:21:43 +01002472 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002473 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002474 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002475 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002476
bellard13eb76e2004-01-24 15:23:36 +00002477 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002478 if (!memory_access_is_direct(mr, is_write)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002479 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002480 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002481 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002482 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002483 switch (l) {
2484 case 8:
2485 /* 64 bit write access */
2486 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002487 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2488 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002489 break;
2490 case 4:
bellard1c213d12005-09-03 10:49:04 +00002491 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002492 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002493 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2494 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002495 break;
2496 case 2:
bellard1c213d12005-09-03 10:49:04 +00002497 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002498 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002499 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2500 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002501 break;
2502 case 1:
bellard1c213d12005-09-03 10:49:04 +00002503 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002504 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002505 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2506 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002507 break;
2508 default:
2509 abort();
bellard13eb76e2004-01-24 15:23:36 +00002510 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002511 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002512 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002513 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002514 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002515 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002516 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002517 }
2518 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002519 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002520 /* I/O case */
Jan Kiszka4840f102015-06-18 18:47:22 +02002521 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002522 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002523 switch (l) {
2524 case 8:
2525 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002526 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2527 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002528 stq_p(buf, val);
2529 break;
2530 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002531 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002532 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2533 attrs);
bellardc27004e2005-01-03 23:35:10 +00002534 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002535 break;
2536 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002537 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002538 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2539 attrs);
bellardc27004e2005-01-03 23:35:10 +00002540 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002541 break;
2542 case 1:
bellard1c213d12005-09-03 10:49:04 +00002543 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002544 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2545 attrs);
bellardc27004e2005-01-03 23:35:10 +00002546 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002547 break;
2548 default:
2549 abort();
bellard13eb76e2004-01-24 15:23:36 +00002550 }
2551 } else {
2552 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002553 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002554 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002555 }
2556 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002557
2558 if (release_lock) {
2559 qemu_mutex_unlock_iothread();
2560 release_lock = false;
2561 }
2562
bellard13eb76e2004-01-24 15:23:36 +00002563 len -= l;
2564 buf += l;
2565 addr += l;
2566 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002567 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002568
Peter Maydell3b643492015-04-26 16:49:23 +01002569 return result;
bellard13eb76e2004-01-24 15:23:36 +00002570}
bellard8df1cd02005-01-28 22:37:22 +00002571
Peter Maydell5c9eb022015-04-26 16:49:24 +01002572MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2573 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002574{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002575 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002576}
2577
Peter Maydell5c9eb022015-04-26 16:49:24 +01002578MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2579 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002580{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002581 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002582}
2583
2584
Avi Kivitya8170e52012-10-23 12:30:10 +02002585void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002586 int len, int is_write)
2587{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002588 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2589 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002590}
2591
Alexander Graf582b55a2013-12-11 14:17:44 +01002592enum write_rom_type {
2593 WRITE_DATA,
2594 FLUSH_CACHE,
2595};
2596
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002597static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002598 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002599{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002600 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002601 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002602 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002603 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002604
Paolo Bonzini41063e12015-03-18 14:21:43 +01002605 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002606 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002607 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002608 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002609
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002610 if (!(memory_region_is_ram(mr) ||
2611 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002612 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002613 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002614 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002615 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002616 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002617 switch (type) {
2618 case WRITE_DATA:
2619 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002620 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002621 break;
2622 case FLUSH_CACHE:
2623 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2624 break;
2625 }
bellardd0ecd2a2006-04-23 17:14:48 +00002626 }
2627 len -= l;
2628 buf += l;
2629 addr += l;
2630 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002631 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002632}
2633
Alexander Graf582b55a2013-12-11 14:17:44 +01002634/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002635void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002636 const uint8_t *buf, int len)
2637{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002638 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002639}
2640
2641void cpu_flush_icache_range(hwaddr start, int len)
2642{
2643 /*
2644 * This function should do the same thing as an icache flush that was
2645 * triggered from within the guest. For TCG we are always cache coherent,
2646 * so there is no need to flush anything. For KVM / Xen we need to flush
2647 * the host's instruction cache at least.
2648 */
2649 if (tcg_enabled()) {
2650 return;
2651 }
2652
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002653 cpu_physical_memory_write_rom_internal(&address_space_memory,
2654 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002655}
2656
aliguori6d16c2f2009-01-22 16:59:11 +00002657typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002658 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002659 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002660 hwaddr addr;
2661 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002662 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002663} BounceBuffer;
2664
2665static BounceBuffer bounce;
2666
aliguoriba223c22009-01-22 16:59:16 +00002667typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002668 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002669 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002670} MapClient;
2671
Fam Zheng38e047b2015-03-16 17:03:35 +08002672QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002673static QLIST_HEAD(map_client_list, MapClient) map_client_list
2674 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002675
Fam Zhenge95205e2015-03-16 17:03:37 +08002676static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002677{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002678 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002679 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002680}
2681
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002682static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002683{
2684 MapClient *client;
2685
Blue Swirl72cf2d42009-09-12 07:36:22 +00002686 while (!QLIST_EMPTY(&map_client_list)) {
2687 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002688 qemu_bh_schedule(client->bh);
2689 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002690 }
2691}
2692
Fam Zhenge95205e2015-03-16 17:03:37 +08002693void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002694{
2695 MapClient *client = g_malloc(sizeof(*client));
2696
Fam Zheng38e047b2015-03-16 17:03:35 +08002697 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002698 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002699 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002700 if (!atomic_read(&bounce.in_use)) {
2701 cpu_notify_map_clients_locked();
2702 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002703 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002704}
2705
Fam Zheng38e047b2015-03-16 17:03:35 +08002706void cpu_exec_init_all(void)
2707{
2708 qemu_mutex_init(&ram_list.mutex);
2709 memory_map_init();
2710 io_mem_init();
2711 qemu_mutex_init(&map_client_list_lock);
2712}
2713
Fam Zhenge95205e2015-03-16 17:03:37 +08002714void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002715{
Fam Zhenge95205e2015-03-16 17:03:37 +08002716 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002717
Fam Zhenge95205e2015-03-16 17:03:37 +08002718 qemu_mutex_lock(&map_client_list_lock);
2719 QLIST_FOREACH(client, &map_client_list, link) {
2720 if (client->bh == bh) {
2721 cpu_unregister_map_client_do(client);
2722 break;
2723 }
2724 }
2725 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002726}
2727
2728static void cpu_notify_map_clients(void)
2729{
Fam Zheng38e047b2015-03-16 17:03:35 +08002730 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002731 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002732 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002733}
2734
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002735bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2736{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002737 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002738 hwaddr l, xlat;
2739
Paolo Bonzini41063e12015-03-18 14:21:43 +01002740 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002741 while (len > 0) {
2742 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002743 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2744 if (!memory_access_is_direct(mr, is_write)) {
2745 l = memory_access_size(mr, l, addr);
2746 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002747 return false;
2748 }
2749 }
2750
2751 len -= l;
2752 addr += l;
2753 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002754 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002755 return true;
2756}
2757
aliguori6d16c2f2009-01-22 16:59:11 +00002758/* Map a physical memory region into a host virtual address.
2759 * May map a subset of the requested range, given by and returned in *plen.
2760 * May return NULL if resources needed to perform the mapping are exhausted.
2761 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002762 * Use cpu_register_map_client() to know when retrying the map operation is
2763 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002764 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002765void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002766 hwaddr addr,
2767 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002768 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002769{
Avi Kivitya8170e52012-10-23 12:30:10 +02002770 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002771 hwaddr done = 0;
2772 hwaddr l, xlat, base;
2773 MemoryRegion *mr, *this_mr;
2774 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002775
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002776 if (len == 0) {
2777 return NULL;
2778 }
aliguori6d16c2f2009-01-22 16:59:11 +00002779
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002780 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002781 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002782 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002783
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002784 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002785 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002786 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002787 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002788 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002789 /* Avoid unbounded allocations */
2790 l = MIN(l, TARGET_PAGE_SIZE);
2791 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002792 bounce.addr = addr;
2793 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002794
2795 memory_region_ref(mr);
2796 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002797 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002798 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2799 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002800 }
aliguori6d16c2f2009-01-22 16:59:11 +00002801
Paolo Bonzini41063e12015-03-18 14:21:43 +01002802 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002803 *plen = l;
2804 return bounce.buffer;
2805 }
2806
2807 base = xlat;
2808 raddr = memory_region_get_ram_addr(mr);
2809
2810 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002811 len -= l;
2812 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002813 done += l;
2814 if (len == 0) {
2815 break;
2816 }
2817
2818 l = len;
2819 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2820 if (this_mr != mr || xlat != base + done) {
2821 break;
2822 }
aliguori6d16c2f2009-01-22 16:59:11 +00002823 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002824
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002825 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002826 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002827 *plen = done;
2828 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002829}
2830
Avi Kivityac1970f2012-10-03 16:22:53 +02002831/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002832 * Will also mark the memory as dirty if is_write == 1. access_len gives
2833 * the amount of memory that was actually read or written by the caller.
2834 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002835void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2836 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002837{
2838 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002839 MemoryRegion *mr;
2840 ram_addr_t addr1;
2841
2842 mr = qemu_ram_addr_from_host(buffer, &addr1);
2843 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002844 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002845 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002846 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002847 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002848 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002849 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002850 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002851 return;
2852 }
2853 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002854 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2855 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002856 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002857 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002858 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002859 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002860 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002861 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002862}
bellardd0ecd2a2006-04-23 17:14:48 +00002863
Avi Kivitya8170e52012-10-23 12:30:10 +02002864void *cpu_physical_memory_map(hwaddr addr,
2865 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002866 int is_write)
2867{
2868 return address_space_map(&address_space_memory, addr, plen, is_write);
2869}
2870
Avi Kivitya8170e52012-10-23 12:30:10 +02002871void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2872 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002873{
2874 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2875}
2876
bellard8df1cd02005-01-28 22:37:22 +00002877/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002878static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2879 MemTxAttrs attrs,
2880 MemTxResult *result,
2881 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002882{
bellard8df1cd02005-01-28 22:37:22 +00002883 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002884 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002885 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002886 hwaddr l = 4;
2887 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002888 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002889 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002890
Paolo Bonzini41063e12015-03-18 14:21:43 +01002891 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002892 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002893 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002894 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002895
bellard8df1cd02005-01-28 22:37:22 +00002896 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002897 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002898#if defined(TARGET_WORDS_BIGENDIAN)
2899 if (endian == DEVICE_LITTLE_ENDIAN) {
2900 val = bswap32(val);
2901 }
2902#else
2903 if (endian == DEVICE_BIG_ENDIAN) {
2904 val = bswap32(val);
2905 }
2906#endif
bellard8df1cd02005-01-28 22:37:22 +00002907 } else {
2908 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002909 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002910 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002911 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002912 switch (endian) {
2913 case DEVICE_LITTLE_ENDIAN:
2914 val = ldl_le_p(ptr);
2915 break;
2916 case DEVICE_BIG_ENDIAN:
2917 val = ldl_be_p(ptr);
2918 break;
2919 default:
2920 val = ldl_p(ptr);
2921 break;
2922 }
Peter Maydell50013112015-04-26 16:49:24 +01002923 r = MEMTX_OK;
2924 }
2925 if (result) {
2926 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002927 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002928 if (release_lock) {
2929 qemu_mutex_unlock_iothread();
2930 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002931 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002932 return val;
2933}
2934
Peter Maydell50013112015-04-26 16:49:24 +01002935uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2936 MemTxAttrs attrs, MemTxResult *result)
2937{
2938 return address_space_ldl_internal(as, addr, attrs, result,
2939 DEVICE_NATIVE_ENDIAN);
2940}
2941
2942uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2943 MemTxAttrs attrs, MemTxResult *result)
2944{
2945 return address_space_ldl_internal(as, addr, attrs, result,
2946 DEVICE_LITTLE_ENDIAN);
2947}
2948
2949uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2950 MemTxAttrs attrs, MemTxResult *result)
2951{
2952 return address_space_ldl_internal(as, addr, attrs, result,
2953 DEVICE_BIG_ENDIAN);
2954}
2955
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002956uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002957{
Peter Maydell50013112015-04-26 16:49:24 +01002958 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002959}
2960
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002961uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002962{
Peter Maydell50013112015-04-26 16:49:24 +01002963 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002964}
2965
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002966uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002967{
Peter Maydell50013112015-04-26 16:49:24 +01002968 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002969}
2970
bellard84b7b8e2005-11-28 21:19:04 +00002971/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002972static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2973 MemTxAttrs attrs,
2974 MemTxResult *result,
2975 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002976{
bellard84b7b8e2005-11-28 21:19:04 +00002977 uint8_t *ptr;
2978 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002979 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002980 hwaddr l = 8;
2981 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002982 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002983 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00002984
Paolo Bonzini41063e12015-03-18 14:21:43 +01002985 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002986 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002987 false);
2988 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002989 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002990
bellard84b7b8e2005-11-28 21:19:04 +00002991 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002992 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002993#if defined(TARGET_WORDS_BIGENDIAN)
2994 if (endian == DEVICE_LITTLE_ENDIAN) {
2995 val = bswap64(val);
2996 }
2997#else
2998 if (endian == DEVICE_BIG_ENDIAN) {
2999 val = bswap64(val);
3000 }
3001#endif
bellard84b7b8e2005-11-28 21:19:04 +00003002 } else {
3003 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003004 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003005 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003006 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003007 switch (endian) {
3008 case DEVICE_LITTLE_ENDIAN:
3009 val = ldq_le_p(ptr);
3010 break;
3011 case DEVICE_BIG_ENDIAN:
3012 val = ldq_be_p(ptr);
3013 break;
3014 default:
3015 val = ldq_p(ptr);
3016 break;
3017 }
Peter Maydell50013112015-04-26 16:49:24 +01003018 r = MEMTX_OK;
3019 }
3020 if (result) {
3021 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003022 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003023 if (release_lock) {
3024 qemu_mutex_unlock_iothread();
3025 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003026 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003027 return val;
3028}
3029
Peter Maydell50013112015-04-26 16:49:24 +01003030uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3031 MemTxAttrs attrs, MemTxResult *result)
3032{
3033 return address_space_ldq_internal(as, addr, attrs, result,
3034 DEVICE_NATIVE_ENDIAN);
3035}
3036
3037uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3038 MemTxAttrs attrs, MemTxResult *result)
3039{
3040 return address_space_ldq_internal(as, addr, attrs, result,
3041 DEVICE_LITTLE_ENDIAN);
3042}
3043
3044uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3045 MemTxAttrs attrs, MemTxResult *result)
3046{
3047 return address_space_ldq_internal(as, addr, attrs, result,
3048 DEVICE_BIG_ENDIAN);
3049}
3050
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003051uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003052{
Peter Maydell50013112015-04-26 16:49:24 +01003053 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003054}
3055
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003056uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003057{
Peter Maydell50013112015-04-26 16:49:24 +01003058 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003059}
3060
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003061uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003062{
Peter Maydell50013112015-04-26 16:49:24 +01003063 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003064}
3065
bellardaab33092005-10-30 20:48:42 +00003066/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003067uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3068 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003069{
3070 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003071 MemTxResult r;
3072
3073 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3074 if (result) {
3075 *result = r;
3076 }
bellardaab33092005-10-30 20:48:42 +00003077 return val;
3078}
3079
Peter Maydell50013112015-04-26 16:49:24 +01003080uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3081{
3082 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3083}
3084
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003085/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003086static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3087 hwaddr addr,
3088 MemTxAttrs attrs,
3089 MemTxResult *result,
3090 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003091{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003092 uint8_t *ptr;
3093 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003094 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003095 hwaddr l = 2;
3096 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003097 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003098 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003099
Paolo Bonzini41063e12015-03-18 14:21:43 +01003100 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003101 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003102 false);
3103 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003104 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003105
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003106 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003107 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003108#if defined(TARGET_WORDS_BIGENDIAN)
3109 if (endian == DEVICE_LITTLE_ENDIAN) {
3110 val = bswap16(val);
3111 }
3112#else
3113 if (endian == DEVICE_BIG_ENDIAN) {
3114 val = bswap16(val);
3115 }
3116#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003117 } else {
3118 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003119 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003120 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003121 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003122 switch (endian) {
3123 case DEVICE_LITTLE_ENDIAN:
3124 val = lduw_le_p(ptr);
3125 break;
3126 case DEVICE_BIG_ENDIAN:
3127 val = lduw_be_p(ptr);
3128 break;
3129 default:
3130 val = lduw_p(ptr);
3131 break;
3132 }
Peter Maydell50013112015-04-26 16:49:24 +01003133 r = MEMTX_OK;
3134 }
3135 if (result) {
3136 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003137 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003138 if (release_lock) {
3139 qemu_mutex_unlock_iothread();
3140 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003141 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003142 return val;
bellardaab33092005-10-30 20:48:42 +00003143}
3144
Peter Maydell50013112015-04-26 16:49:24 +01003145uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3146 MemTxAttrs attrs, MemTxResult *result)
3147{
3148 return address_space_lduw_internal(as, addr, attrs, result,
3149 DEVICE_NATIVE_ENDIAN);
3150}
3151
3152uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3153 MemTxAttrs attrs, MemTxResult *result)
3154{
3155 return address_space_lduw_internal(as, addr, attrs, result,
3156 DEVICE_LITTLE_ENDIAN);
3157}
3158
3159uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3160 MemTxAttrs attrs, MemTxResult *result)
3161{
3162 return address_space_lduw_internal(as, addr, attrs, result,
3163 DEVICE_BIG_ENDIAN);
3164}
3165
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003166uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003167{
Peter Maydell50013112015-04-26 16:49:24 +01003168 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003169}
3170
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003171uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003172{
Peter Maydell50013112015-04-26 16:49:24 +01003173 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003174}
3175
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003176uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003177{
Peter Maydell50013112015-04-26 16:49:24 +01003178 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003179}
3180
bellard8df1cd02005-01-28 22:37:22 +00003181/* warning: addr must be aligned. The ram page is not masked as dirty
3182 and the code inside is not invalidated. It is useful if the dirty
3183 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003184void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3185 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003186{
bellard8df1cd02005-01-28 22:37:22 +00003187 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003188 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003189 hwaddr l = 4;
3190 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003191 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003192 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003193 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003194
Paolo Bonzini41063e12015-03-18 14:21:43 +01003195 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003196 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003197 true);
3198 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003199 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003200
Peter Maydell50013112015-04-26 16:49:24 +01003201 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003202 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003203 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003204 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003205 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003206
Paolo Bonzini845b6212015-03-23 11:45:53 +01003207 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3208 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003209 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003210 r = MEMTX_OK;
3211 }
3212 if (result) {
3213 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003214 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003215 if (release_lock) {
3216 qemu_mutex_unlock_iothread();
3217 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003218 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003219}
3220
Peter Maydell50013112015-04-26 16:49:24 +01003221void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3222{
3223 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3224}
3225
bellard8df1cd02005-01-28 22:37:22 +00003226/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003227static inline void address_space_stl_internal(AddressSpace *as,
3228 hwaddr addr, uint32_t val,
3229 MemTxAttrs attrs,
3230 MemTxResult *result,
3231 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003232{
bellard8df1cd02005-01-28 22:37:22 +00003233 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003234 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003235 hwaddr l = 4;
3236 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003237 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003238 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003239
Paolo Bonzini41063e12015-03-18 14:21:43 +01003240 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003241 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003242 true);
3243 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003244 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003245
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003246#if defined(TARGET_WORDS_BIGENDIAN)
3247 if (endian == DEVICE_LITTLE_ENDIAN) {
3248 val = bswap32(val);
3249 }
3250#else
3251 if (endian == DEVICE_BIG_ENDIAN) {
3252 val = bswap32(val);
3253 }
3254#endif
Peter Maydell50013112015-04-26 16:49:24 +01003255 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003256 } else {
bellard8df1cd02005-01-28 22:37:22 +00003257 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003258 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003259 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003260 switch (endian) {
3261 case DEVICE_LITTLE_ENDIAN:
3262 stl_le_p(ptr, val);
3263 break;
3264 case DEVICE_BIG_ENDIAN:
3265 stl_be_p(ptr, val);
3266 break;
3267 default:
3268 stl_p(ptr, val);
3269 break;
3270 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003271 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003272 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003273 }
Peter Maydell50013112015-04-26 16:49:24 +01003274 if (result) {
3275 *result = r;
3276 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003277 if (release_lock) {
3278 qemu_mutex_unlock_iothread();
3279 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003280 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003281}
3282
3283void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3284 MemTxAttrs attrs, MemTxResult *result)
3285{
3286 address_space_stl_internal(as, addr, val, attrs, result,
3287 DEVICE_NATIVE_ENDIAN);
3288}
3289
3290void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3291 MemTxAttrs attrs, MemTxResult *result)
3292{
3293 address_space_stl_internal(as, addr, val, attrs, result,
3294 DEVICE_LITTLE_ENDIAN);
3295}
3296
3297void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3298 MemTxAttrs attrs, MemTxResult *result)
3299{
3300 address_space_stl_internal(as, addr, val, attrs, result,
3301 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003302}
3303
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003304void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003305{
Peter Maydell50013112015-04-26 16:49:24 +01003306 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003307}
3308
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003309void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003310{
Peter Maydell50013112015-04-26 16:49:24 +01003311 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003312}
3313
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003314void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003315{
Peter Maydell50013112015-04-26 16:49:24 +01003316 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003317}
3318
bellardaab33092005-10-30 20:48:42 +00003319/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003320void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3321 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003322{
3323 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003324 MemTxResult r;
3325
3326 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3327 if (result) {
3328 *result = r;
3329 }
3330}
3331
3332void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3333{
3334 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003335}
3336
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003337/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003338static inline void address_space_stw_internal(AddressSpace *as,
3339 hwaddr addr, uint32_t val,
3340 MemTxAttrs attrs,
3341 MemTxResult *result,
3342 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003343{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003344 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003345 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003346 hwaddr l = 2;
3347 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003348 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003349 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003350
Paolo Bonzini41063e12015-03-18 14:21:43 +01003351 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003352 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003353 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003354 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003355
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003356#if defined(TARGET_WORDS_BIGENDIAN)
3357 if (endian == DEVICE_LITTLE_ENDIAN) {
3358 val = bswap16(val);
3359 }
3360#else
3361 if (endian == DEVICE_BIG_ENDIAN) {
3362 val = bswap16(val);
3363 }
3364#endif
Peter Maydell50013112015-04-26 16:49:24 +01003365 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003366 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003367 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003368 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003369 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370 switch (endian) {
3371 case DEVICE_LITTLE_ENDIAN:
3372 stw_le_p(ptr, val);
3373 break;
3374 case DEVICE_BIG_ENDIAN:
3375 stw_be_p(ptr, val);
3376 break;
3377 default:
3378 stw_p(ptr, val);
3379 break;
3380 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003381 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003382 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003383 }
Peter Maydell50013112015-04-26 16:49:24 +01003384 if (result) {
3385 *result = r;
3386 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003387 if (release_lock) {
3388 qemu_mutex_unlock_iothread();
3389 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003390 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003391}
3392
3393void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3394 MemTxAttrs attrs, MemTxResult *result)
3395{
3396 address_space_stw_internal(as, addr, val, attrs, result,
3397 DEVICE_NATIVE_ENDIAN);
3398}
3399
3400void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3401 MemTxAttrs attrs, MemTxResult *result)
3402{
3403 address_space_stw_internal(as, addr, val, attrs, result,
3404 DEVICE_LITTLE_ENDIAN);
3405}
3406
3407void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3408 MemTxAttrs attrs, MemTxResult *result)
3409{
3410 address_space_stw_internal(as, addr, val, attrs, result,
3411 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003412}
3413
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003414void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003415{
Peter Maydell50013112015-04-26 16:49:24 +01003416 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003417}
3418
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003419void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003420{
Peter Maydell50013112015-04-26 16:49:24 +01003421 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003422}
3423
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003424void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003425{
Peter Maydell50013112015-04-26 16:49:24 +01003426 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003427}
3428
bellardaab33092005-10-30 20:48:42 +00003429/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003430void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3431 MemTxAttrs attrs, MemTxResult *result)
3432{
3433 MemTxResult r;
3434 val = tswap64(val);
3435 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3436 if (result) {
3437 *result = r;
3438 }
3439}
3440
3441void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3442 MemTxAttrs attrs, MemTxResult *result)
3443{
3444 MemTxResult r;
3445 val = cpu_to_le64(val);
3446 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3447 if (result) {
3448 *result = r;
3449 }
3450}
3451void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3452 MemTxAttrs attrs, MemTxResult *result)
3453{
3454 MemTxResult r;
3455 val = cpu_to_be64(val);
3456 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3457 if (result) {
3458 *result = r;
3459 }
3460}
3461
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003462void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003463{
Peter Maydell50013112015-04-26 16:49:24 +01003464 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003465}
3466
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003467void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003468{
Peter Maydell50013112015-04-26 16:49:24 +01003469 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003470}
3471
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003472void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003473{
Peter Maydell50013112015-04-26 16:49:24 +01003474 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003475}
3476
aliguori5e2972f2009-03-28 17:51:36 +00003477/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003478int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003479 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003480{
3481 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003482 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003483 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003484
3485 while (len > 0) {
3486 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003487 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003488 /* if no physical page mapped, return an error */
3489 if (phys_addr == -1)
3490 return -1;
3491 l = (page + TARGET_PAGE_SIZE) - addr;
3492 if (l > len)
3493 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003494 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003495 if (is_write) {
3496 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3497 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003498 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3499 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003500 }
bellard13eb76e2004-01-24 15:23:36 +00003501 len -= l;
3502 buf += l;
3503 addr += l;
3504 }
3505 return 0;
3506}
Paul Brooka68fe892010-03-01 00:08:59 +00003507#endif
bellard13eb76e2004-01-24 15:23:36 +00003508
Blue Swirl8e4a4242013-01-06 18:30:17 +00003509/*
3510 * A helper function for the _utterly broken_ virtio device model to find out if
3511 * it's running on a big endian machine. Don't do this at home kids!
3512 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003513bool target_words_bigendian(void);
3514bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003515{
3516#if defined(TARGET_WORDS_BIGENDIAN)
3517 return true;
3518#else
3519 return false;
3520#endif
3521}
3522
Wen Congyang76f35532012-05-07 12:04:18 +08003523#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003524bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003525{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003526 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003527 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003528 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003529
Paolo Bonzini41063e12015-03-18 14:21:43 +01003530 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003531 mr = address_space_translate(&address_space_memory,
3532 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003533
Paolo Bonzini41063e12015-03-18 14:21:43 +01003534 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3535 rcu_read_unlock();
3536 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003537}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003538
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003539int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003540{
3541 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003542 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003543
Mike Day0dc3f442013-09-05 14:41:35 -04003544 rcu_read_lock();
3545 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003546 ret = func(block->idstr, block->host, block->offset,
3547 block->used_length, opaque);
3548 if (ret) {
3549 break;
3550 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003551 }
Mike Day0dc3f442013-09-05 14:41:35 -04003552 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003553 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003554}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003555#endif