blob: f09dd4e9284a54c83a746351646bb4458c4a4480 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000021#include <sys/mman.h>
22#endif
bellard54936002003-05-13 00:25:15 +000023
Stefan Weil055403b2010-10-22 23:03:32 +020024#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000026#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000027#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010028#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020029#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020033#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040048#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020049#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030051#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000052
Paolo Bonzini022c62c2012-12-17 18:19:49 +010053#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020054#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030055#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030058#ifndef _WIN32
59#include "qemu/mmap-alloc.h"
60#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061
blueswir1db7b5422007-05-26 17:36:03 +000062//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000063
pbrook99773bd2006-04-16 15:14:59 +000064#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040065/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
Mike Day0d53d9f2015-01-21 13:45:24 +010068RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030069
70static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030071static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030072
Avi Kivityf6790af2012-10-02 20:13:51 +020073AddressSpace address_space_io;
74AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020075
Paolo Bonzini0844e002013-05-24 14:37:28 +020076MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020077static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020078
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080079/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80#define RAM_PREALLOC (1 << 0)
81
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080082/* RAM is mmap-ed with MAP_SHARED */
83#define RAM_SHARED (1 << 1)
84
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020085/* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88#define RAM_RESIZEABLE (1 << 2)
89
pbrooke2eef172008-06-08 01:09:01 +000090#endif
bellard9fa3e852004-01-04 18:06:42 +000091
Andreas Färberbdc44642013-06-24 23:50:24 +020092struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000093/* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020095__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000096/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000097 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000098 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010099int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000100
pbrooke2eef172008-06-08 01:09:01 +0000101#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200102
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200103typedef struct PhysPageEntry PhysPageEntry;
104
105struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200109 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200110};
111
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100115#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200117#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118#define P_L2_SIZE (1 << P_L2_BITS)
119
120#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121
122typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200123
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200124typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100125 struct rcu_head rcu;
126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127 unsigned sections_nb;
128 unsigned sections_nb_alloc;
129 unsigned nodes_nb;
130 unsigned nodes_nb_alloc;
131 Node *nodes;
132 MemoryRegionSection *sections;
133} PhysPageMap;
134
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200135struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100136 struct rcu_head rcu;
137
Fam Zheng729633c2016-03-01 14:18:24 +0800138 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200143 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200144 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200145};
146
Jan Kiszka90260c62013-05-26 21:46:51 +0200147#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148typedef struct subpage_t {
149 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200150 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153} subpage_t;
154
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200155#define PHYS_SECTION_UNASSIGNED 0
156#define PHYS_SECTION_NOTDIRTY 1
157#define PHYS_SECTION_ROM 2
158#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200159
pbrooke2eef172008-06-08 01:09:01 +0000160static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300161static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000162static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000163
Avi Kivity1ec9b902012-01-02 12:47:48 +0200164static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100165
166/**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
pbrook6658ffb2007-03-16 23:58:11 +0000180#endif
bellard54936002003-05-13 00:25:15 +0000181
Paul Brook6d9a1302010-02-28 23:55:53 +0000182#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
191}
192
Paolo Bonzinidb946042015-05-21 15:12:29 +0200193static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194{
195 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200196 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197 PhysPageEntry e;
198 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211}
212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200215 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216{
217 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200221 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200227 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200228 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200229 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 *index += step;
231 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200232 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200234 }
235 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200236 }
237}
238
Avi Kivityac1970f2012-10-03 16:22:53 +0200239static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200240 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200241 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000242{
Avi Kivity29990972012-02-13 20:21:20 +0200243 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000247}
248
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200249/* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253{
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300}
301
302static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303{
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308 }
309}
310
Fam Zheng29cb5332016-03-01 14:18:23 +0800311static inline bool section_covers_addr(const MemoryRegionSection *section,
312 hwaddr addr)
313{
314 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
315 * the section must cover the entire address space.
316 */
317 return section->size.hi ||
318 range_covers_byte(section->offset_within_address_space,
319 section->size.lo, addr);
320}
321
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200322static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200323 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000324{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200325 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200326 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200327 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200328
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200329 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200330 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200331 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200332 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200333 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100334 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200335 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200336
Fam Zheng29cb5332016-03-01 14:18:23 +0800337 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200338 return &sections[lp.ptr];
339 } else {
340 return &sections[PHYS_SECTION_UNASSIGNED];
341 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200342}
343
Blue Swirle5548612012-04-21 13:08:33 +0000344bool memory_region_is_unassigned(MemoryRegion *mr)
345{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200346 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000347 && mr != &io_mem_watch;
348}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100350/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200351static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200352 hwaddr addr,
353 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200354{
Fam Zheng729633c2016-03-01 14:18:24 +0800355 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800357 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200358
Fam Zheng729633c2016-03-01 14:18:24 +0800359 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
360 section_covers_addr(section, addr)) {
361 update = false;
362 } else {
363 section = phys_page_find(d->phys_map, addr, d->map.nodes,
364 d->map.sections);
365 update = true;
366 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200367 if (resolve_subpage && section->mr->subpage) {
368 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200369 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200370 }
Fam Zheng729633c2016-03-01 14:18:24 +0800371 if (update) {
372 atomic_set(&d->mru_section, section);
373 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200374 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200375}
376
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100377/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200378static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200379address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200381{
382 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200383 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100384 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200385
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200386 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387 /* Compute offset within MemoryRegionSection */
388 addr -= section->offset_within_address_space;
389
390 /* Compute offset within MemoryRegion */
391 *xlat = addr + section->offset_within_region;
392
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200393 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200394
395 /* MMIO registers can be expected to perform full-width accesses based only
396 * on their address, without considering adjacent registers that could
397 * decode to completely different MemoryRegions. When such registers
398 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
399 * regions overlap wildly. For this reason we cannot clamp the accesses
400 * here.
401 *
402 * If the length is small (as is the case for address_space_ldl/stl),
403 * everything works fine. If the incoming length is large, however,
404 * the caller really has to do the clamping through memory_access_size.
405 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200406 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200407 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200408 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
409 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200410 return section;
411}
Jan Kiszka90260c62013-05-26 21:46:51 +0200412
Paolo Bonzini41063e12015-03-18 14:21:43 +0100413/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200414MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
415 hwaddr *xlat, hwaddr *plen,
416 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200417{
Avi Kivity30951152012-10-30 13:47:46 +0200418 IOMMUTLBEntry iotlb;
419 MemoryRegionSection *section;
420 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200421
422 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100423 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
424 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200425 mr = section->mr;
426
427 if (!mr->iommu_ops) {
428 break;
429 }
430
Le Tan8d7b8cb2014-08-16 13:55:37 +0800431 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200432 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
433 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700434 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200435 if (!(iotlb.perm & (1 << is_write))) {
436 mr = &io_mem_unassigned;
437 break;
438 }
439
440 as = iotlb.target_as;
441 }
442
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000443 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100444 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700445 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100446 }
447
Avi Kivity30951152012-10-30 13:47:46 +0200448 *xlat = addr;
449 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200450}
451
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100452/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200453MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000454address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200455 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200456{
Avi Kivity30951152012-10-30 13:47:46 +0200457 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000458 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
459
460 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200461
462 assert(!section->mr->iommu_ops);
463 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200464}
bellard9fa3e852004-01-04 18:06:42 +0000465#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000466
Andreas Färberb170fce2013-01-20 20:23:22 +0100467#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000468
Juan Quintelae59fb372009-09-29 22:48:21 +0200469static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200470{
Andreas Färber259186a2013-01-17 18:51:17 +0100471 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200472
aurel323098dba2009-03-07 21:28:24 +0000473 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
474 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100475 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100476 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000477
478 return 0;
479}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200480
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400481static int cpu_common_pre_load(void *opaque)
482{
483 CPUState *cpu = opaque;
484
Paolo Bonziniadee6422014-12-19 12:53:14 +0100485 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400486
487 return 0;
488}
489
490static bool cpu_common_exception_index_needed(void *opaque)
491{
492 CPUState *cpu = opaque;
493
Paolo Bonziniadee6422014-12-19 12:53:14 +0100494 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400495}
496
497static const VMStateDescription vmstate_cpu_common_exception_index = {
498 .name = "cpu_common/exception_index",
499 .version_id = 1,
500 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200501 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400502 .fields = (VMStateField[]) {
503 VMSTATE_INT32(exception_index, CPUState),
504 VMSTATE_END_OF_LIST()
505 }
506};
507
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300508static bool cpu_common_crash_occurred_needed(void *opaque)
509{
510 CPUState *cpu = opaque;
511
512 return cpu->crash_occurred;
513}
514
515static const VMStateDescription vmstate_cpu_common_crash_occurred = {
516 .name = "cpu_common/crash_occurred",
517 .version_id = 1,
518 .minimum_version_id = 1,
519 .needed = cpu_common_crash_occurred_needed,
520 .fields = (VMStateField[]) {
521 VMSTATE_BOOL(crash_occurred, CPUState),
522 VMSTATE_END_OF_LIST()
523 }
524};
525
Andreas Färber1a1562f2013-06-17 04:09:11 +0200526const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200527 .name = "cpu_common",
528 .version_id = 1,
529 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400530 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200531 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200532 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100533 VMSTATE_UINT32(halted, CPUState),
534 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200537 .subsections = (const VMStateDescription*[]) {
538 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300539 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200540 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 }
542};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200543
pbrook9656f322008-07-01 20:01:19 +0000544#endif
545
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100546CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400547{
Andreas Färberbdc44642013-06-24 23:50:24 +0200548 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400549
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100551 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200552 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100553 }
Glauber Costa950f1472009-06-09 12:15:18 -0400554 }
555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400557}
558
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000559#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000560void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000561{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000562 CPUAddressSpace *newas;
563
564 /* Target code should have set num_ases before calling us */
565 assert(asidx < cpu->num_ases);
566
Peter Maydell56943e82016-01-21 14:15:04 +0000567 if (asidx == 0) {
568 /* address space 0 gets the convenience alias */
569 cpu->as = as;
570 }
571
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000572 /* KVM cannot currently support multiple address spaces. */
573 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000574
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000575 if (!cpu->cpu_ases) {
576 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000577 }
Peter Maydell32857f42015-10-01 15:29:50 +0100578
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000579 newas = &cpu->cpu_ases[asidx];
580 newas->cpu = cpu;
581 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000582 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000583 newas->tcg_as_listener.commit = tcg_commit;
584 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000585 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000586}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000587
588AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
589{
590 /* Return the AddressSpace corresponding to the specified index */
591 return cpu->cpu_ases[asidx].as;
592}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000593#endif
594
Bharata B Raob7bca732015-06-23 19:31:13 -0700595#ifndef CONFIG_USER_ONLY
596static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
597
598static int cpu_get_free_index(Error **errp)
599{
600 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
601
602 if (cpu >= MAX_CPUMASK_BITS) {
603 error_setg(errp, "Trying to use more CPUs than max of %d",
604 MAX_CPUMASK_BITS);
605 return -1;
606 }
607
608 bitmap_set(cpu_index_map, cpu, 1);
609 return cpu;
610}
611
612void cpu_exec_exit(CPUState *cpu)
613{
614 if (cpu->cpu_index == -1) {
615 /* cpu_index was never allocated by this @cpu or was already freed. */
616 return;
617 }
618
619 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
620 cpu->cpu_index = -1;
621}
622#else
623
624static int cpu_get_free_index(Error **errp)
625{
626 CPUState *some_cpu;
627 int cpu_index = 0;
628
629 CPU_FOREACH(some_cpu) {
630 cpu_index++;
631 }
632 return cpu_index;
633}
634
635void cpu_exec_exit(CPUState *cpu)
636{
637}
638#endif
639
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700640void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000641{
Andreas Färberb170fce2013-01-20 20:23:22 +0100642 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000643 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700644 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000645
Peter Maydell56943e82016-01-21 14:15:04 +0000646 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000647 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000648
Eduardo Habkost291135b2015-04-27 17:00:33 -0300649#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300650 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000651
652 /* This is a softmmu CPU object, so create a property for it
653 * so users can wire up its memory. (This can't go in qom/cpu.c
654 * because that file is compiled only once for both user-mode
655 * and system builds.) The default if no link is set up is to use
656 * the system address space.
657 */
658 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
659 (Object **)&cpu->memory,
660 qdev_prop_allow_set_link_before_realize,
661 OBJ_PROP_LINK_UNREF_ON_RELEASE,
662 &error_abort);
663 cpu->memory = system_memory;
664 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300665#endif
666
pbrookc2764712009-03-07 15:24:59 +0000667#if defined(CONFIG_USER_ONLY)
668 cpu_list_lock();
669#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700670 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
671 if (local_err) {
672 error_propagate(errp, local_err);
673#if defined(CONFIG_USER_ONLY)
674 cpu_list_unlock();
675#endif
676 return;
bellard6a00d602005-11-21 23:25:50 +0000677 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200678 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000679#if defined(CONFIG_USER_ONLY)
680 cpu_list_unlock();
681#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200682 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
683 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
684 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100685 if (cc->vmsd != NULL) {
686 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
687 }
bellardfd6ce8f2003-05-14 19:00:11 +0000688}
689
Paul Brook94df27f2010-02-28 23:47:45 +0000690#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200691static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000692{
693 tb_invalidate_phys_page_range(pc, pc + 1, 0);
694}
695#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200696static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400697{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000698 MemTxAttrs attrs;
699 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
700 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400701 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000702 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100703 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400704 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400705}
bellardc27004e2005-01-03 23:35:10 +0000706#endif
bellardd720b932004-04-25 17:57:43 +0000707
Paul Brookc527ee82010-03-01 03:31:14 +0000708#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200709void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000710
711{
712}
713
Peter Maydell3ee887e2014-09-12 14:06:48 +0100714int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
715 int flags)
716{
717 return -ENOSYS;
718}
719
720void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
721{
722}
723
Andreas Färber75a34032013-09-02 16:57:02 +0200724int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000725 int flags, CPUWatchpoint **watchpoint)
726{
727 return -ENOSYS;
728}
729#else
pbrook6658ffb2007-03-16 23:58:11 +0000730/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200731int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000732 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000733{
aliguoric0ce9982008-11-25 22:13:57 +0000734 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000735
Peter Maydell05068c02014-09-12 14:06:48 +0100736 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700737 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200738 error_report("tried to set invalid watchpoint at %"
739 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000740 return -EINVAL;
741 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500742 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000743
aliguoria1d1bb32008-11-18 20:07:32 +0000744 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100745 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000746 wp->flags = flags;
747
aliguori2dc9f412008-11-18 20:56:59 +0000748 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200749 if (flags & BP_GDB) {
750 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
751 } else {
752 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
753 }
aliguoria1d1bb32008-11-18 20:07:32 +0000754
Andreas Färber31b030d2013-09-04 01:29:02 +0200755 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000756
757 if (watchpoint)
758 *watchpoint = wp;
759 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000760}
761
aliguoria1d1bb32008-11-18 20:07:32 +0000762/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200763int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000764 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000765{
aliguoria1d1bb32008-11-18 20:07:32 +0000766 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000767
Andreas Färberff4700b2013-08-26 18:23:18 +0200768 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100769 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000770 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200771 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000772 return 0;
773 }
774 }
aliguoria1d1bb32008-11-18 20:07:32 +0000775 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000776}
777
aliguoria1d1bb32008-11-18 20:07:32 +0000778/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200779void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000780{
Andreas Färberff4700b2013-08-26 18:23:18 +0200781 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000782
Andreas Färber31b030d2013-09-04 01:29:02 +0200783 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000784
Anthony Liguori7267c092011-08-20 22:09:37 -0500785 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000786}
787
aliguoria1d1bb32008-11-18 20:07:32 +0000788/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200789void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000790{
aliguoric0ce9982008-11-25 22:13:57 +0000791 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000792
Andreas Färberff4700b2013-08-26 18:23:18 +0200793 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200794 if (wp->flags & mask) {
795 cpu_watchpoint_remove_by_ref(cpu, wp);
796 }
aliguoric0ce9982008-11-25 22:13:57 +0000797 }
aliguoria1d1bb32008-11-18 20:07:32 +0000798}
Peter Maydell05068c02014-09-12 14:06:48 +0100799
800/* Return true if this watchpoint address matches the specified
801 * access (ie the address range covered by the watchpoint overlaps
802 * partially or completely with the address range covered by the
803 * access).
804 */
805static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
806 vaddr addr,
807 vaddr len)
808{
809 /* We know the lengths are non-zero, but a little caution is
810 * required to avoid errors in the case where the range ends
811 * exactly at the top of the address space and so addr + len
812 * wraps round to zero.
813 */
814 vaddr wpend = wp->vaddr + wp->len - 1;
815 vaddr addrend = addr + len - 1;
816
817 return !(addr > wpend || wp->vaddr > addrend);
818}
819
Paul Brookc527ee82010-03-01 03:31:14 +0000820#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000821
822/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200823int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000824 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000825{
aliguoric0ce9982008-11-25 22:13:57 +0000826 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000827
Anthony Liguori7267c092011-08-20 22:09:37 -0500828 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000829
830 bp->pc = pc;
831 bp->flags = flags;
832
aliguori2dc9f412008-11-18 20:56:59 +0000833 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200834 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200835 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200836 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200838 }
aliguoria1d1bb32008-11-18 20:07:32 +0000839
Andreas Färberf0c3c502013-08-26 21:22:53 +0200840 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färber00b941e2013-06-29 18:55:54 +0200842 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000843 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200844 }
aliguoria1d1bb32008-11-18 20:07:32 +0000845 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000846}
847
848/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200849int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000850{
aliguoria1d1bb32008-11-18 20:07:32 +0000851 CPUBreakpoint *bp;
852
Andreas Färberf0c3c502013-08-26 21:22:53 +0200853 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000854 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200855 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000856 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000857 }
bellard4c3a88a2003-07-26 12:06:08 +0000858 }
aliguoria1d1bb32008-11-18 20:07:32 +0000859 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000860}
861
aliguoria1d1bb32008-11-18 20:07:32 +0000862/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200863void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000864{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200865 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
866
867 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000868
Anthony Liguori7267c092011-08-20 22:09:37 -0500869 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000870}
871
872/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200873void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000874{
aliguoric0ce9982008-11-25 22:13:57 +0000875 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000876
Andreas Färberf0c3c502013-08-26 21:22:53 +0200877 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200878 if (bp->flags & mask) {
879 cpu_breakpoint_remove_by_ref(cpu, bp);
880 }
aliguoric0ce9982008-11-25 22:13:57 +0000881 }
bellard4c3a88a2003-07-26 12:06:08 +0000882}
883
bellardc33a3462003-07-29 20:50:33 +0000884/* enable or disable single step mode. EXCP_DEBUG is returned by the
885 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200886void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000887{
Andreas Färbered2803d2013-06-21 20:20:45 +0200888 if (cpu->singlestep_enabled != enabled) {
889 cpu->singlestep_enabled = enabled;
890 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200891 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200892 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100893 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000894 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700895 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000896 }
bellardc33a3462003-07-29 20:50:33 +0000897 }
bellardc33a3462003-07-29 20:50:33 +0000898}
899
Andreas Färbera47dddd2013-09-03 17:38:47 +0200900void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000901{
902 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000903 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000904
905 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000906 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000907 fprintf(stderr, "qemu: fatal: ");
908 vfprintf(stderr, fmt, ap);
909 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200910 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100911 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000912 qemu_log("qemu: fatal: ");
913 qemu_log_vprintf(fmt, ap2);
914 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200915 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000916 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000917 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000918 }
pbrook493ae1f2007-11-23 16:53:59 +0000919 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000920 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300921 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200922#if defined(CONFIG_USER_ONLY)
923 {
924 struct sigaction act;
925 sigfillset(&act.sa_mask);
926 act.sa_handler = SIG_DFL;
927 sigaction(SIGABRT, &act, NULL);
928 }
929#endif
bellard75012672003-06-21 13:11:07 +0000930 abort();
931}
932
bellard01243112004-01-04 15:48:17 +0000933#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400934/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200935static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
936{
937 RAMBlock *block;
938
Paolo Bonzini43771532013-09-09 17:58:40 +0200939 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200940 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200941 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200942 }
Mike Day0dc3f442013-09-05 14:41:35 -0400943 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200944 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200945 goto found;
946 }
947 }
948
949 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
950 abort();
951
952found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200953 /* It is safe to write mru_block outside the iothread lock. This
954 * is what happens:
955 *
956 * mru_block = xxx
957 * rcu_read_unlock()
958 * xxx removed from list
959 * rcu_read_lock()
960 * read mru_block
961 * mru_block = NULL;
962 * call_rcu(reclaim_ramblock, xxx);
963 * rcu_read_unlock()
964 *
965 * atomic_rcu_set is not needed here. The block was already published
966 * when it was placed into the list. Here we're just making an extra
967 * copy of the pointer.
968 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200969 ram_list.mru_block = block;
970 return block;
971}
972
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200973static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000974{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700975 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200976 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200977 RAMBlock *block;
978 ram_addr_t end;
979
980 end = TARGET_PAGE_ALIGN(start + length);
981 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000982
Mike Day0dc3f442013-09-05 14:41:35 -0400983 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200984 block = qemu_get_ram_block(start);
985 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200986 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700987 CPU_FOREACH(cpu) {
988 tlb_reset_dirty(cpu, start1, length);
989 }
Mike Day0dc3f442013-09-05 14:41:35 -0400990 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200991}
992
993/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000994bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
995 ram_addr_t length,
996 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200997{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000998 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000999 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001000 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001001
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001002 if (length == 0) {
1003 return false;
1004 }
1005
1006 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1007 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001008
1009 rcu_read_lock();
1010
1011 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1012
1013 while (page < end) {
1014 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1015 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1016 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1017
1018 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1019 offset, num);
1020 page += num;
1021 }
1022
1023 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001024
1025 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001026 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001027 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001028
1029 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001030}
1031
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001032/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001033hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001034 MemoryRegionSection *section,
1035 target_ulong vaddr,
1036 hwaddr paddr, hwaddr xlat,
1037 int prot,
1038 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001039{
Avi Kivitya8170e52012-10-23 12:30:10 +02001040 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001041 CPUWatchpoint *wp;
1042
Blue Swirlcc5bea62012-04-14 14:56:48 +00001043 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001044 /* Normal RAM. */
1045 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001046 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001047 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001048 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001049 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001050 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001051 }
1052 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001053 AddressSpaceDispatch *d;
1054
1055 d = atomic_rcu_read(&section->address_space->dispatch);
1056 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001057 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001058 }
1059
1060 /* Make accesses to pages with watchpoints go via the
1061 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001062 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001063 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001064 /* Avoid trapping reads of pages with a write breakpoint. */
1065 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001066 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001067 *address |= TLB_MMIO;
1068 break;
1069 }
1070 }
1071 }
1072
1073 return iotlb;
1074}
bellard9fa3e852004-01-04 18:06:42 +00001075#endif /* defined(CONFIG_USER_ONLY) */
1076
pbrooke2eef172008-06-08 01:09:01 +00001077#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001078
Anthony Liguoric227f092009-10-01 16:12:16 -05001079static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001080 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001081static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001082
Igor Mammedova2b257d2014-10-31 16:38:37 +00001083static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1084 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001085
1086/*
1087 * Set a custom physical guest memory alloator.
1088 * Accelerators with unusual needs may need this. Hopefully, we can
1089 * get rid of it eventually.
1090 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001091void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001092{
1093 phys_mem_alloc = alloc;
1094}
1095
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001096static uint16_t phys_section_add(PhysPageMap *map,
1097 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001098{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001099 /* The physical section number is ORed with a page-aligned
1100 * pointer to produce the iotlb entries. Thus it should
1101 * never overflow into the page-aligned value.
1102 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001103 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001104
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001105 if (map->sections_nb == map->sections_nb_alloc) {
1106 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1107 map->sections = g_renew(MemoryRegionSection, map->sections,
1108 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001109 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001110 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001111 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001112 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001113}
1114
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001115static void phys_section_destroy(MemoryRegion *mr)
1116{
Don Slutz55b4e802015-11-30 17:11:04 -05001117 bool have_sub_page = mr->subpage;
1118
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001119 memory_region_unref(mr);
1120
Don Slutz55b4e802015-11-30 17:11:04 -05001121 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001122 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001123 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001124 g_free(subpage);
1125 }
1126}
1127
Paolo Bonzini60926662013-05-29 12:30:26 +02001128static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001129{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001130 while (map->sections_nb > 0) {
1131 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001132 phys_section_destroy(section->mr);
1133 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001134 g_free(map->sections);
1135 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001136}
1137
Avi Kivityac1970f2012-10-03 16:22:53 +02001138static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001139{
1140 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001141 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001142 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001143 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001144 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001145 MemoryRegionSection subsection = {
1146 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001147 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001148 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001149 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001150
Avi Kivityf3705d52012-03-08 16:16:34 +02001151 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001152
Avi Kivityf3705d52012-03-08 16:16:34 +02001153 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001154 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001155 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001156 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001157 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001158 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001160 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001161 }
1162 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001163 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001164 subpage_register(subpage, start, end,
1165 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001166}
1167
1168
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001169static void register_multipage(AddressSpaceDispatch *d,
1170 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001171{
Avi Kivitya8170e52012-10-23 12:30:10 +02001172 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001173 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001174 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1175 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001176
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001177 assert(num_pages);
1178 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001179}
1180
Avi Kivityac1970f2012-10-03 16:22:53 +02001181static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001182{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001183 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001184 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001185 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001186 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001187
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001188 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1189 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1190 - now.offset_within_address_space;
1191
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001192 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001193 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001194 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001195 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001196 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001197 while (int128_ne(remain.size, now.size)) {
1198 remain.size = int128_sub(remain.size, now.size);
1199 remain.offset_within_address_space += int128_get64(now.size);
1200 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001201 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001202 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001203 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001204 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001205 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001206 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001207 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001208 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001209 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001210 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001211 }
1212}
1213
Sheng Yang62a27442010-01-26 19:21:16 +08001214void qemu_flush_coalesced_mmio_buffer(void)
1215{
1216 if (kvm_enabled())
1217 kvm_flush_coalesced_mmio_buffer();
1218}
1219
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001220void qemu_mutex_lock_ramlist(void)
1221{
1222 qemu_mutex_lock(&ram_list.mutex);
1223}
1224
1225void qemu_mutex_unlock_ramlist(void)
1226{
1227 qemu_mutex_unlock(&ram_list.mutex);
1228}
1229
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001230#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231
1232#include <sys/vfs.h>
1233
1234#define HUGETLBFS_MAGIC 0x958458f6
1235
Hu Taofc7a5802014-09-09 13:28:01 +08001236static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237{
1238 struct statfs fs;
1239 int ret;
1240
1241 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001242 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001243 } while (ret != 0 && errno == EINTR);
1244
1245 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001246 error_setg_errno(errp, errno, "failed to get page size of file %s",
1247 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001248 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001249 }
1250
Marcelo Tosattic9027602010-03-01 20:25:08 -03001251 return fs.f_bsize;
1252}
1253
Alex Williamson04b16652010-07-02 11:13:17 -06001254static void *file_ram_alloc(RAMBlock *block,
1255 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001256 const char *path,
1257 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001258{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001259 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001260 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001261 char *sanitized_name;
1262 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001263 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001264 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001265 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001266 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001267
Hu Taofc7a5802014-09-09 13:28:01 +08001268 hpagesize = gethugepagesize(path, &local_err);
1269 if (local_err) {
1270 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001271 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001272 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001273 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001274
1275 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001276 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1277 "or larger than huge page size 0x%" PRIx64,
1278 memory, hpagesize);
1279 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001280 }
1281
1282 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001283 error_setg(errp,
1284 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001285 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001286 }
1287
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001288 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1289 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1290 sanitized_name = g_strdup(memory_region_name(block->mr));
1291 for (c = sanitized_name; *c != '\0'; c++) {
1292 if (*c == '/') {
1293 *c = '_';
1294 }
1295 }
1296
1297 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1298 sanitized_name);
1299 g_free(sanitized_name);
1300
1301 fd = mkstemp(filename);
1302 if (fd >= 0) {
1303 unlink(filename);
1304 }
1305 g_free(filename);
1306 } else {
1307 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001308 }
1309
Marcelo Tosattic9027602010-03-01 20:25:08 -03001310 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001311 error_setg_errno(errp, errno,
1312 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001313 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001314 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001315
Chen Hanxiao9284f312015-07-24 11:12:03 +08001316 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001317
1318 /*
1319 * ftruncate is not supported by hugetlbfs in older
1320 * hosts, so don't bother bailing out on errors.
1321 * If anything goes wrong with it under other filesystems,
1322 * mmap will fail.
1323 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001324 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001325 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001326 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001327
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001328 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001329 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001330 error_setg_errno(errp, errno,
1331 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001332 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001333 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001334 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001335
1336 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001337 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001338 }
1339
Alex Williamson04b16652010-07-02 11:13:17 -06001340 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001341 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001342
1343error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001344 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001345}
1346#endif
1347
Mike Day0dc3f442013-09-05 14:41:35 -04001348/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001349static ram_addr_t find_ram_offset(ram_addr_t size)
1350{
Alex Williamson04b16652010-07-02 11:13:17 -06001351 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001352 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001353
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001354 assert(size != 0); /* it would hand out same offset multiple times */
1355
Mike Day0dc3f442013-09-05 14:41:35 -04001356 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001357 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001358 }
Alex Williamson04b16652010-07-02 11:13:17 -06001359
Mike Day0dc3f442013-09-05 14:41:35 -04001360 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001361 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001362
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001363 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001364
Mike Day0dc3f442013-09-05 14:41:35 -04001365 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001366 if (next_block->offset >= end) {
1367 next = MIN(next, next_block->offset);
1368 }
1369 }
1370 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001371 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001372 mingap = next - end;
1373 }
1374 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001375
1376 if (offset == RAM_ADDR_MAX) {
1377 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1378 (uint64_t)size);
1379 abort();
1380 }
1381
Alex Williamson04b16652010-07-02 11:13:17 -06001382 return offset;
1383}
1384
Juan Quintela652d7ec2012-07-20 10:37:54 +02001385ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001386{
Alex Williamsond17b5282010-06-25 11:08:38 -06001387 RAMBlock *block;
1388 ram_addr_t last = 0;
1389
Mike Day0dc3f442013-09-05 14:41:35 -04001390 rcu_read_lock();
1391 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001392 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001393 }
Mike Day0dc3f442013-09-05 14:41:35 -04001394 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001395 return last;
1396}
1397
Jason Baronddb97f12012-08-02 15:44:16 -04001398static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1399{
1400 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001401
1402 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001403 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001404 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1405 if (ret) {
1406 perror("qemu_madvise");
1407 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1408 "but dump_guest_core=off specified\n");
1409 }
1410 }
1411}
1412
Mike Day0dc3f442013-09-05 14:41:35 -04001413/* Called within an RCU critical section, or while the ramlist lock
1414 * is held.
1415 */
Hu Tao20cfe882014-04-02 15:13:26 +08001416static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001417{
Hu Tao20cfe882014-04-02 15:13:26 +08001418 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001419
Mike Day0dc3f442013-09-05 14:41:35 -04001420 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001421 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001422 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001423 }
1424 }
Hu Tao20cfe882014-04-02 15:13:26 +08001425
1426 return NULL;
1427}
1428
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001429const char *qemu_ram_get_idstr(RAMBlock *rb)
1430{
1431 return rb->idstr;
1432}
1433
Mike Dayae3a7042013-09-05 14:41:35 -04001434/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001435void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1436{
Mike Dayae3a7042013-09-05 14:41:35 -04001437 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001438
Mike Day0dc3f442013-09-05 14:41:35 -04001439 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001440 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001441 assert(new_block);
1442 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001443
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001444 if (dev) {
1445 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001446 if (id) {
1447 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001448 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001449 }
1450 }
1451 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1452
Mike Day0dc3f442013-09-05 14:41:35 -04001453 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001454 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001455 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1456 new_block->idstr);
1457 abort();
1458 }
1459 }
Mike Day0dc3f442013-09-05 14:41:35 -04001460 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001461}
1462
Mike Dayae3a7042013-09-05 14:41:35 -04001463/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001464void qemu_ram_unset_idstr(ram_addr_t addr)
1465{
Mike Dayae3a7042013-09-05 14:41:35 -04001466 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001467
Mike Dayae3a7042013-09-05 14:41:35 -04001468 /* FIXME: arch_init.c assumes that this is not called throughout
1469 * migration. Ignore the problem since hot-unplug during migration
1470 * does not work anyway.
1471 */
1472
Mike Day0dc3f442013-09-05 14:41:35 -04001473 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001474 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001475 if (block) {
1476 memset(block->idstr, 0, sizeof(block->idstr));
1477 }
Mike Day0dc3f442013-09-05 14:41:35 -04001478 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001479}
1480
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001481static int memory_try_enable_merging(void *addr, size_t len)
1482{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001483 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001484 /* disabled by the user */
1485 return 0;
1486 }
1487
1488 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1489}
1490
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001491/* Only legal before guest might have detected the memory size: e.g. on
1492 * incoming migration, or right after reset.
1493 *
1494 * As memory core doesn't know how is memory accessed, it is up to
1495 * resize callback to update device state and/or add assertions to detect
1496 * misuse, if necessary.
1497 */
1498int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1499{
1500 RAMBlock *block = find_ram_block(base);
1501
1502 assert(block);
1503
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001504 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001505
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001506 if (block->used_length == newsize) {
1507 return 0;
1508 }
1509
1510 if (!(block->flags & RAM_RESIZEABLE)) {
1511 error_setg_errno(errp, EINVAL,
1512 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1513 " in != 0x" RAM_ADDR_FMT, block->idstr,
1514 newsize, block->used_length);
1515 return -EINVAL;
1516 }
1517
1518 if (block->max_length < newsize) {
1519 error_setg_errno(errp, EINVAL,
1520 "Length too large: %s: 0x" RAM_ADDR_FMT
1521 " > 0x" RAM_ADDR_FMT, block->idstr,
1522 newsize, block->max_length);
1523 return -EINVAL;
1524 }
1525
1526 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1527 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001528 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1529 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001530 memory_region_set_size(block->mr, newsize);
1531 if (block->resized) {
1532 block->resized(block->idstr, newsize, block->host);
1533 }
1534 return 0;
1535}
1536
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001537/* Called with ram_list.mutex held */
1538static void dirty_memory_extend(ram_addr_t old_ram_size,
1539 ram_addr_t new_ram_size)
1540{
1541 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1542 DIRTY_MEMORY_BLOCK_SIZE);
1543 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1544 DIRTY_MEMORY_BLOCK_SIZE);
1545 int i;
1546
1547 /* Only need to extend if block count increased */
1548 if (new_num_blocks <= old_num_blocks) {
1549 return;
1550 }
1551
1552 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1553 DirtyMemoryBlocks *old_blocks;
1554 DirtyMemoryBlocks *new_blocks;
1555 int j;
1556
1557 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1558 new_blocks = g_malloc(sizeof(*new_blocks) +
1559 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1560
1561 if (old_num_blocks) {
1562 memcpy(new_blocks->blocks, old_blocks->blocks,
1563 old_num_blocks * sizeof(old_blocks->blocks[0]));
1564 }
1565
1566 for (j = old_num_blocks; j < new_num_blocks; j++) {
1567 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1568 }
1569
1570 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1571
1572 if (old_blocks) {
1573 g_free_rcu(old_blocks, rcu);
1574 }
1575 }
1576}
1577
Fam Zheng528f46a2016-03-01 14:18:18 +08001578static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001579{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001580 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001581 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001582 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001583 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001584
1585 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001586
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001587 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001588 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001589
1590 if (!new_block->host) {
1591 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001592 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001593 new_block->mr, &err);
1594 if (err) {
1595 error_propagate(errp, err);
1596 qemu_mutex_unlock_ramlist();
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001597 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001598 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001599 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001600 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001601 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001602 error_setg_errno(errp, errno,
1603 "cannot set up guest memory '%s'",
1604 memory_region_name(new_block->mr));
1605 qemu_mutex_unlock_ramlist();
Markus Armbruster39228252013-07-31 15:11:11 +02001606 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001607 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001608 }
1609 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001610
Li Zhijiandd631692015-07-02 20:18:06 +08001611 new_ram_size = MAX(old_ram_size,
1612 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1613 if (new_ram_size > old_ram_size) {
1614 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001615 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001616 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001617 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1618 * QLIST (which has an RCU-friendly variant) does not have insertion at
1619 * tail, so save the last element in last_block.
1620 */
Mike Day0dc3f442013-09-05 14:41:35 -04001621 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001622 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001623 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001624 break;
1625 }
1626 }
1627 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001628 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001629 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001630 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001631 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001632 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001633 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001634 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001635
Mike Day0dc3f442013-09-05 14:41:35 -04001636 /* Write list before version */
1637 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001638 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001639 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001640
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001641 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001642 new_block->used_length,
1643 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001644
Paolo Bonzinia904c912015-01-21 16:18:35 +01001645 if (new_block->host) {
1646 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1647 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1648 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1649 if (kvm_enabled()) {
1650 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1651 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001652 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001653}
1654
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001655#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001656RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1657 bool share, const char *mem_path,
1658 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001659{
1660 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001661 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001662
1663 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001664 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001665 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001666 }
1667
1668 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1669 /*
1670 * file_ram_alloc() needs to allocate just like
1671 * phys_mem_alloc, but we haven't bothered to provide
1672 * a hook there.
1673 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001674 error_setg(errp,
1675 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001676 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001677 }
1678
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001679 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001680 new_block = g_malloc0(sizeof(*new_block));
1681 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001682 new_block->used_length = size;
1683 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001684 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001685 new_block->host = file_ram_alloc(new_block, size,
1686 mem_path, errp);
1687 if (!new_block->host) {
1688 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001689 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001690 }
1691
Fam Zheng528f46a2016-03-01 14:18:18 +08001692 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001693 if (local_err) {
1694 g_free(new_block);
1695 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001696 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001697 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001698 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001699}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001700#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001701
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001702static
Fam Zheng528f46a2016-03-01 14:18:18 +08001703RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1704 void (*resized)(const char*,
1705 uint64_t length,
1706 void *host),
1707 void *host, bool resizeable,
1708 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001709{
1710 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001711 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001712
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001713 size = HOST_PAGE_ALIGN(size);
1714 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001715 new_block = g_malloc0(sizeof(*new_block));
1716 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001717 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001718 new_block->used_length = size;
1719 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001720 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001721 new_block->fd = -1;
1722 new_block->host = host;
1723 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001724 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001725 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001726 if (resizeable) {
1727 new_block->flags |= RAM_RESIZEABLE;
1728 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001729 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001730 if (local_err) {
1731 g_free(new_block);
1732 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001733 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001734 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001735 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001736}
1737
Fam Zheng528f46a2016-03-01 14:18:18 +08001738RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001739 MemoryRegion *mr, Error **errp)
1740{
1741 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1742}
1743
Fam Zheng528f46a2016-03-01 14:18:18 +08001744RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001745{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001746 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1747}
1748
Fam Zheng528f46a2016-03-01 14:18:18 +08001749RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001750 void (*resized)(const char*,
1751 uint64_t length,
1752 void *host),
1753 MemoryRegion *mr, Error **errp)
1754{
1755 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001756}
bellarde9a1ab12007-02-08 23:08:38 +00001757
Paolo Bonzini43771532013-09-09 17:58:40 +02001758static void reclaim_ramblock(RAMBlock *block)
1759{
1760 if (block->flags & RAM_PREALLOC) {
1761 ;
1762 } else if (xen_enabled()) {
1763 xen_invalidate_map_cache_entry(block->host);
1764#ifndef _WIN32
1765 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001766 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001767 close(block->fd);
1768#endif
1769 } else {
1770 qemu_anon_ram_free(block->host, block->max_length);
1771 }
1772 g_free(block);
1773}
1774
Fam Zhengf1060c52016-03-01 14:18:22 +08001775void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001776{
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001777 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001778 QLIST_REMOVE_RCU(block, next);
1779 ram_list.mru_block = NULL;
1780 /* Write list before version */
1781 smp_wmb();
1782 ram_list.version++;
1783 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001784 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001785}
1786
Huang Yingcd19cfa2011-03-02 08:56:19 +01001787#ifndef _WIN32
1788void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1789{
1790 RAMBlock *block;
1791 ram_addr_t offset;
1792 int flags;
1793 void *area, *vaddr;
1794
Mike Day0dc3f442013-09-05 14:41:35 -04001795 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001796 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001797 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001798 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001799 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001800 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001801 } else if (xen_enabled()) {
1802 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001803 } else {
1804 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001805 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001806 flags |= (block->flags & RAM_SHARED ?
1807 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001808 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1809 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001810 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001811 /*
1812 * Remap needs to match alloc. Accelerators that
1813 * set phys_mem_alloc never remap. If they did,
1814 * we'd need a remap hook here.
1815 */
1816 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1817
Huang Yingcd19cfa2011-03-02 08:56:19 +01001818 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1819 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1820 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001821 }
1822 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001823 fprintf(stderr, "Could not remap addr: "
1824 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001825 length, addr);
1826 exit(1);
1827 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001828 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001829 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001830 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001831 }
1832 }
1833}
1834#endif /* !_WIN32 */
1835
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001836int qemu_get_ram_fd(ram_addr_t addr)
1837{
Mike Dayae3a7042013-09-05 14:41:35 -04001838 RAMBlock *block;
1839 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001840
Mike Day0dc3f442013-09-05 14:41:35 -04001841 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001842 block = qemu_get_ram_block(addr);
1843 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001844 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001845 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001846}
1847
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001848void qemu_set_ram_fd(ram_addr_t addr, int fd)
1849{
1850 RAMBlock *block;
1851
1852 rcu_read_lock();
1853 block = qemu_get_ram_block(addr);
1854 block->fd = fd;
1855 rcu_read_unlock();
1856}
1857
Damjan Marion3fd74b82014-06-26 23:01:32 +02001858void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1859{
Mike Dayae3a7042013-09-05 14:41:35 -04001860 RAMBlock *block;
1861 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001862
Mike Day0dc3f442013-09-05 14:41:35 -04001863 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001864 block = qemu_get_ram_block(addr);
1865 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001866 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001867 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001868}
1869
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001870/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001871 * This should not be used for general purpose DMA. Use address_space_map
1872 * or address_space_rw instead. For local memory (e.g. video ram) that the
1873 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001874 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001875 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001876 */
Gonglei3655cb92016-02-20 10:35:20 +08001877void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001878{
Gonglei3655cb92016-02-20 10:35:20 +08001879 RAMBlock *block = ram_block;
1880
1881 if (block == NULL) {
1882 block = qemu_get_ram_block(addr);
1883 }
Mike Dayae3a7042013-09-05 14:41:35 -04001884
1885 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001886 /* We need to check if the requested address is in the RAM
1887 * because we don't want to map the entire memory in QEMU.
1888 * In that case just map until the end of the page.
1889 */
1890 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001891 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001892 }
Mike Dayae3a7042013-09-05 14:41:35 -04001893
1894 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001895 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001896 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001897}
1898
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001899/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001900 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001901 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001902 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001903 */
Gonglei3655cb92016-02-20 10:35:20 +08001904static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1905 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001906{
Gonglei3655cb92016-02-20 10:35:20 +08001907 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001908 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001909 if (*size == 0) {
1910 return NULL;
1911 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001912
Gonglei3655cb92016-02-20 10:35:20 +08001913 if (block == NULL) {
1914 block = qemu_get_ram_block(addr);
1915 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001916 offset_inside_block = addr - block->offset;
1917 *size = MIN(*size, block->max_length - offset_inside_block);
1918
1919 if (xen_enabled() && block->host == NULL) {
1920 /* We need to check if the requested address is in the RAM
1921 * because we don't want to map the entire memory in QEMU.
1922 * In that case just map the requested area.
1923 */
1924 if (block->offset == 0) {
1925 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001926 }
1927
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001928 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001929 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001930
1931 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001932}
1933
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001934/*
1935 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1936 * in that RAMBlock.
1937 *
1938 * ptr: Host pointer to look up
1939 * round_offset: If true round the result offset down to a page boundary
1940 * *ram_addr: set to result ram_addr
1941 * *offset: set to result offset within the RAMBlock
1942 *
1943 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001944 *
1945 * By the time this function returns, the returned pointer is not protected
1946 * by RCU anymore. If the caller is not within an RCU critical section and
1947 * does not hold the iothread lock, it must have other means of protecting the
1948 * pointer, such as a reference to the region that includes the incoming
1949 * ram_addr_t.
1950 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001951RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1952 ram_addr_t *ram_addr,
1953 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001954{
pbrook94a6b542009-04-11 17:15:54 +00001955 RAMBlock *block;
1956 uint8_t *host = ptr;
1957
Jan Kiszka868bb332011-06-21 22:59:09 +02001958 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001959 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001960 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001961 block = qemu_get_ram_block(*ram_addr);
1962 if (block) {
1963 *offset = (host - block->host);
1964 }
Mike Day0dc3f442013-09-05 14:41:35 -04001965 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001966 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001967 }
1968
Mike Day0dc3f442013-09-05 14:41:35 -04001969 rcu_read_lock();
1970 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001971 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001972 goto found;
1973 }
1974
Mike Day0dc3f442013-09-05 14:41:35 -04001975 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001976 /* This case append when the block is not mapped. */
1977 if (block->host == NULL) {
1978 continue;
1979 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001980 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001981 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001982 }
pbrook94a6b542009-04-11 17:15:54 +00001983 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001984
Mike Day0dc3f442013-09-05 14:41:35 -04001985 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001986 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001987
1988found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001989 *offset = (host - block->host);
1990 if (round_offset) {
1991 *offset &= TARGET_PAGE_MASK;
1992 }
1993 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001994 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001995 return block;
1996}
1997
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001998/*
1999 * Finds the named RAMBlock
2000 *
2001 * name: The name of RAMBlock to find
2002 *
2003 * Returns: RAMBlock (or NULL if not found)
2004 */
2005RAMBlock *qemu_ram_block_by_name(const char *name)
2006{
2007 RAMBlock *block;
2008
2009 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2010 if (!strcmp(name, block->idstr)) {
2011 return block;
2012 }
2013 }
2014
2015 return NULL;
2016}
2017
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002018/* Some of the softmmu routines need to translate from a host pointer
2019 (typically a TLB entry) back to a ram offset. */
2020MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2021{
2022 RAMBlock *block;
2023 ram_addr_t offset; /* Not used */
2024
2025 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2026
2027 if (!block) {
2028 return NULL;
2029 }
2030
2031 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002032}
Alex Williamsonf471a172010-06-11 11:11:42 -06002033
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002034/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002035static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002036 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002037{
Juan Quintela52159192013-10-08 12:44:04 +02002038 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002039 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002040 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002041 switch (size) {
2042 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002043 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002044 break;
2045 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002046 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002047 break;
2048 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002049 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002050 break;
2051 default:
2052 abort();
2053 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002054 /* Set both VGA and migration bits for simplicity and to remove
2055 * the notdirty callback faster.
2056 */
2057 cpu_physical_memory_set_dirty_range(ram_addr, size,
2058 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002059 /* we remove the notdirty callback only if the code has been
2060 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002061 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002062 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002063 }
bellard1ccde1c2004-02-06 19:46:14 +00002064}
2065
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002066static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2067 unsigned size, bool is_write)
2068{
2069 return is_write;
2070}
2071
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002072static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002073 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002074 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002075 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002076};
2077
pbrook0f459d12008-06-09 00:20:13 +00002078/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002079static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002080{
Andreas Färber93afead2013-08-26 03:41:01 +02002081 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002082 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002083 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002084 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002085 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002086 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002087 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002088
Andreas Färberff4700b2013-08-26 18:23:18 +02002089 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002090 /* We re-entered the check after replacing the TB. Now raise
2091 * the debug interrupt so that is will trigger after the
2092 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002093 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002094 return;
2095 }
Andreas Färber93afead2013-08-26 03:41:01 +02002096 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002097 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002098 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2099 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002100 if (flags == BP_MEM_READ) {
2101 wp->flags |= BP_WATCHPOINT_HIT_READ;
2102 } else {
2103 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2104 }
2105 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002106 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002107 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002108 if (wp->flags & BP_CPU &&
2109 !cc->debug_check_watchpoint(cpu, wp)) {
2110 wp->flags &= ~BP_WATCHPOINT_HIT;
2111 continue;
2112 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002113 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002114 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002115 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002116 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002117 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002118 } else {
2119 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002120 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002121 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002122 }
aliguori06d55cc2008-11-18 20:24:06 +00002123 }
aliguori6e140f22008-11-18 20:37:55 +00002124 } else {
2125 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002126 }
2127 }
2128}
2129
pbrook6658ffb2007-03-16 23:58:11 +00002130/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2131 so these check for a hit then pass through to the normal out-of-line
2132 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002133static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2134 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002135{
Peter Maydell66b9b432015-04-26 16:49:24 +01002136 MemTxResult res;
2137 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002138 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2139 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002140
Peter Maydell66b9b432015-04-26 16:49:24 +01002141 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002142 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002143 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002144 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002145 break;
2146 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002147 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002148 break;
2149 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002150 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002151 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002152 default: abort();
2153 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002154 *pdata = data;
2155 return res;
2156}
2157
2158static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2159 uint64_t val, unsigned size,
2160 MemTxAttrs attrs)
2161{
2162 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002163 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2164 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002165
2166 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2167 switch (size) {
2168 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002169 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002170 break;
2171 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002172 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002173 break;
2174 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002175 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002176 break;
2177 default: abort();
2178 }
2179 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002180}
2181
Avi Kivity1ec9b902012-01-02 12:47:48 +02002182static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002183 .read_with_attrs = watch_mem_read,
2184 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002185 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002186};
pbrook6658ffb2007-03-16 23:58:11 +00002187
Peter Maydellf25a49e2015-04-26 16:49:24 +01002188static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2189 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002190{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002191 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002192 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002193 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002194
blueswir1db7b5422007-05-26 17:36:03 +00002195#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002196 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002197 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002198#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002199 res = address_space_read(subpage->as, addr + subpage->base,
2200 attrs, buf, len);
2201 if (res) {
2202 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002203 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002204 switch (len) {
2205 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002206 *data = ldub_p(buf);
2207 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002208 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002209 *data = lduw_p(buf);
2210 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002211 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002212 *data = ldl_p(buf);
2213 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002214 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002215 *data = ldq_p(buf);
2216 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002217 default:
2218 abort();
2219 }
blueswir1db7b5422007-05-26 17:36:03 +00002220}
2221
Peter Maydellf25a49e2015-04-26 16:49:24 +01002222static MemTxResult subpage_write(void *opaque, hwaddr addr,
2223 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002224{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002225 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002226 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002227
blueswir1db7b5422007-05-26 17:36:03 +00002228#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002229 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002230 " value %"PRIx64"\n",
2231 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002232#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002233 switch (len) {
2234 case 1:
2235 stb_p(buf, value);
2236 break;
2237 case 2:
2238 stw_p(buf, value);
2239 break;
2240 case 4:
2241 stl_p(buf, value);
2242 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002243 case 8:
2244 stq_p(buf, value);
2245 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002246 default:
2247 abort();
2248 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002249 return address_space_write(subpage->as, addr + subpage->base,
2250 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002251}
2252
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002253static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002254 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002255{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002256 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002257#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002258 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002259 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002260#endif
2261
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002262 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002263 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002264}
2265
Avi Kivity70c68e42012-01-02 12:32:48 +02002266static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002267 .read_with_attrs = subpage_read,
2268 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002269 .impl.min_access_size = 1,
2270 .impl.max_access_size = 8,
2271 .valid.min_access_size = 1,
2272 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002273 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002274 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002275};
2276
Anthony Liguoric227f092009-10-01 16:12:16 -05002277static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002278 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002279{
2280 int idx, eidx;
2281
2282 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2283 return -1;
2284 idx = SUBPAGE_IDX(start);
2285 eidx = SUBPAGE_IDX(end);
2286#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002287 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2288 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002289#endif
blueswir1db7b5422007-05-26 17:36:03 +00002290 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002291 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002292 }
2293
2294 return 0;
2295}
2296
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002297static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002298{
Anthony Liguoric227f092009-10-01 16:12:16 -05002299 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002300
Anthony Liguori7267c092011-08-20 22:09:37 -05002301 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002302
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002303 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002304 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002305 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002306 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002307 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002308#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002309 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2310 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002311#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002312 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002313
2314 return mmio;
2315}
2316
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002317static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2318 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002319{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002320 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002321 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002322 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002323 .mr = mr,
2324 .offset_within_address_space = 0,
2325 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002326 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002327 };
2328
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002329 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002330}
2331
Peter Maydella54c87b2016-01-21 14:15:05 +00002332MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002333{
Peter Maydella54c87b2016-01-21 14:15:05 +00002334 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2335 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002336 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002337 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002338
2339 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002340}
2341
Avi Kivitye9179ce2009-06-14 11:38:52 +03002342static void io_mem_init(void)
2343{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002344 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002345 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002346 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002347 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002348 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002349 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002350 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002351}
2352
Avi Kivityac1970f2012-10-03 16:22:53 +02002353static void mem_begin(MemoryListener *listener)
2354{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002355 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002356 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2357 uint16_t n;
2358
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002359 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002360 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002361 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002362 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002363 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002364 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002365 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002366 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002367
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002368 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002369 d->as = as;
2370 as->next_dispatch = d;
2371}
2372
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002373static void address_space_dispatch_free(AddressSpaceDispatch *d)
2374{
2375 phys_sections_free(&d->map);
2376 g_free(d);
2377}
2378
Paolo Bonzini00752702013-05-29 12:13:54 +02002379static void mem_commit(MemoryListener *listener)
2380{
2381 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002382 AddressSpaceDispatch *cur = as->dispatch;
2383 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002384
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002385 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002386
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002387 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002388 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002389 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002390 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002391}
2392
Avi Kivity1d711482012-10-02 18:54:45 +02002393static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002394{
Peter Maydell32857f42015-10-01 15:29:50 +01002395 CPUAddressSpace *cpuas;
2396 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002397
2398 /* since each CPU stores ram addresses in its TLB cache, we must
2399 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002400 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2401 cpu_reloading_memory_map();
2402 /* The CPU and TLB are protected by the iothread lock.
2403 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2404 * may have split the RCU critical section.
2405 */
2406 d = atomic_rcu_read(&cpuas->as->dispatch);
2407 cpuas->memory_dispatch = d;
2408 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002409}
2410
Avi Kivityac1970f2012-10-03 16:22:53 +02002411void address_space_init_dispatch(AddressSpace *as)
2412{
Paolo Bonzini00752702013-05-29 12:13:54 +02002413 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002414 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002415 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002416 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002417 .region_add = mem_add,
2418 .region_nop = mem_add,
2419 .priority = 0,
2420 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002421 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002422}
2423
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002424void address_space_unregister(AddressSpace *as)
2425{
2426 memory_listener_unregister(&as->dispatch_listener);
2427}
2428
Avi Kivity83f3c252012-10-07 12:59:55 +02002429void address_space_destroy_dispatch(AddressSpace *as)
2430{
2431 AddressSpaceDispatch *d = as->dispatch;
2432
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002433 atomic_rcu_set(&as->dispatch, NULL);
2434 if (d) {
2435 call_rcu(d, address_space_dispatch_free, rcu);
2436 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002437}
2438
Avi Kivity62152b82011-07-26 14:26:14 +03002439static void memory_map_init(void)
2440{
Anthony Liguori7267c092011-08-20 22:09:37 -05002441 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002442
Paolo Bonzini57271d62013-11-07 17:14:37 +01002443 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002444 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002445
Anthony Liguori7267c092011-08-20 22:09:37 -05002446 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002447 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2448 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002449 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002450}
2451
2452MemoryRegion *get_system_memory(void)
2453{
2454 return system_memory;
2455}
2456
Avi Kivity309cb472011-08-08 16:09:03 +03002457MemoryRegion *get_system_io(void)
2458{
2459 return system_io;
2460}
2461
pbrooke2eef172008-06-08 01:09:01 +00002462#endif /* !defined(CONFIG_USER_ONLY) */
2463
bellard13eb76e2004-01-24 15:23:36 +00002464/* physical memory access (slow version, mainly for debug) */
2465#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002466int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002467 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002468{
2469 int l, flags;
2470 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002471 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002472
2473 while (len > 0) {
2474 page = addr & TARGET_PAGE_MASK;
2475 l = (page + TARGET_PAGE_SIZE) - addr;
2476 if (l > len)
2477 l = len;
2478 flags = page_get_flags(page);
2479 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002480 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002481 if (is_write) {
2482 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002483 return -1;
bellard579a97f2007-11-11 14:26:47 +00002484 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002485 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002486 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002487 memcpy(p, buf, l);
2488 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002489 } else {
2490 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002491 return -1;
bellard579a97f2007-11-11 14:26:47 +00002492 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002493 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002494 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002495 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002496 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002497 }
2498 len -= l;
2499 buf += l;
2500 addr += l;
2501 }
Paul Brooka68fe892010-03-01 00:08:59 +00002502 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002503}
bellard8df1cd02005-01-28 22:37:22 +00002504
bellard13eb76e2004-01-24 15:23:36 +00002505#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002506
Paolo Bonzini845b6212015-03-23 11:45:53 +01002507static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002508 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002509{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002510 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2511 /* No early return if dirty_log_mask is or becomes 0, because
2512 * cpu_physical_memory_set_dirty_range will still call
2513 * xen_modified_memory.
2514 */
2515 if (dirty_log_mask) {
2516 dirty_log_mask =
2517 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002518 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002519 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2520 tb_invalidate_phys_range(addr, addr + length);
2521 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2522 }
2523 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002524}
2525
Richard Henderson23326162013-07-08 14:55:59 -07002526static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002527{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002528 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002529
2530 /* Regions are assumed to support 1-4 byte accesses unless
2531 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002532 if (access_size_max == 0) {
2533 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002534 }
Richard Henderson23326162013-07-08 14:55:59 -07002535
2536 /* Bound the maximum access by the alignment of the address. */
2537 if (!mr->ops->impl.unaligned) {
2538 unsigned align_size_max = addr & -addr;
2539 if (align_size_max != 0 && align_size_max < access_size_max) {
2540 access_size_max = align_size_max;
2541 }
2542 }
2543
2544 /* Don't attempt accesses larger than the maximum. */
2545 if (l > access_size_max) {
2546 l = access_size_max;
2547 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002548 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002549
2550 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002551}
2552
Jan Kiszka4840f102015-06-18 18:47:22 +02002553static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002554{
Jan Kiszka4840f102015-06-18 18:47:22 +02002555 bool unlocked = !qemu_mutex_iothread_locked();
2556 bool release_lock = false;
2557
2558 if (unlocked && mr->global_locking) {
2559 qemu_mutex_lock_iothread();
2560 unlocked = false;
2561 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002562 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002563 if (mr->flush_coalesced_mmio) {
2564 if (unlocked) {
2565 qemu_mutex_lock_iothread();
2566 }
2567 qemu_flush_coalesced_mmio_buffer();
2568 if (unlocked) {
2569 qemu_mutex_unlock_iothread();
2570 }
2571 }
2572
2573 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002574}
2575
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002576/* Called within RCU critical section. */
2577static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2578 MemTxAttrs attrs,
2579 const uint8_t *buf,
2580 int len, hwaddr addr1,
2581 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002582{
bellard13eb76e2004-01-24 15:23:36 +00002583 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002584 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002585 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002586 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002587
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002588 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002589 if (!memory_access_is_direct(mr, true)) {
2590 release_lock |= prepare_mmio_access(mr);
2591 l = memory_access_size(mr, l, addr1);
2592 /* XXX: could force current_cpu to NULL to avoid
2593 potential bugs */
2594 switch (l) {
2595 case 8:
2596 /* 64 bit write access */
2597 val = ldq_p(buf);
2598 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2599 attrs);
2600 break;
2601 case 4:
2602 /* 32 bit write access */
2603 val = ldl_p(buf);
2604 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2605 attrs);
2606 break;
2607 case 2:
2608 /* 16 bit write access */
2609 val = lduw_p(buf);
2610 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2611 attrs);
2612 break;
2613 case 1:
2614 /* 8 bit write access */
2615 val = ldub_p(buf);
2616 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2617 attrs);
2618 break;
2619 default:
2620 abort();
bellard13eb76e2004-01-24 15:23:36 +00002621 }
2622 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002623 addr1 += memory_region_get_ram_addr(mr);
2624 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002625 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002626 memcpy(ptr, buf, l);
2627 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002628 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002629
2630 if (release_lock) {
2631 qemu_mutex_unlock_iothread();
2632 release_lock = false;
2633 }
2634
bellard13eb76e2004-01-24 15:23:36 +00002635 len -= l;
2636 buf += l;
2637 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002638
2639 if (!len) {
2640 break;
2641 }
2642
2643 l = len;
2644 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002645 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002646
Peter Maydell3b643492015-04-26 16:49:23 +01002647 return result;
bellard13eb76e2004-01-24 15:23:36 +00002648}
bellard8df1cd02005-01-28 22:37:22 +00002649
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002650MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2651 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002652{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002653 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002654 hwaddr addr1;
2655 MemoryRegion *mr;
2656 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002657
2658 if (len > 0) {
2659 rcu_read_lock();
2660 l = len;
2661 mr = address_space_translate(as, addr, &addr1, &l, true);
2662 result = address_space_write_continue(as, addr, attrs, buf, len,
2663 addr1, l, mr);
2664 rcu_read_unlock();
2665 }
2666
2667 return result;
2668}
2669
2670/* Called within RCU critical section. */
2671MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2672 MemTxAttrs attrs, uint8_t *buf,
2673 int len, hwaddr addr1, hwaddr l,
2674 MemoryRegion *mr)
2675{
2676 uint8_t *ptr;
2677 uint64_t val;
2678 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002679 bool release_lock = false;
2680
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002681 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002682 if (!memory_access_is_direct(mr, false)) {
2683 /* I/O case */
2684 release_lock |= prepare_mmio_access(mr);
2685 l = memory_access_size(mr, l, addr1);
2686 switch (l) {
2687 case 8:
2688 /* 64 bit read access */
2689 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2690 attrs);
2691 stq_p(buf, val);
2692 break;
2693 case 4:
2694 /* 32 bit read access */
2695 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2696 attrs);
2697 stl_p(buf, val);
2698 break;
2699 case 2:
2700 /* 16 bit read access */
2701 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2702 attrs);
2703 stw_p(buf, val);
2704 break;
2705 case 1:
2706 /* 8 bit read access */
2707 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2708 attrs);
2709 stb_p(buf, val);
2710 break;
2711 default:
2712 abort();
2713 }
2714 } else {
2715 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002716 ptr = qemu_get_ram_ptr(mr->ram_block,
2717 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002718 memcpy(buf, ptr, l);
2719 }
2720
2721 if (release_lock) {
2722 qemu_mutex_unlock_iothread();
2723 release_lock = false;
2724 }
2725
2726 len -= l;
2727 buf += l;
2728 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002729
2730 if (!len) {
2731 break;
2732 }
2733
2734 l = len;
2735 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002736 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002737
2738 return result;
2739}
2740
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002741MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2742 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002743{
2744 hwaddr l;
2745 hwaddr addr1;
2746 MemoryRegion *mr;
2747 MemTxResult result = MEMTX_OK;
2748
2749 if (len > 0) {
2750 rcu_read_lock();
2751 l = len;
2752 mr = address_space_translate(as, addr, &addr1, &l, false);
2753 result = address_space_read_continue(as, addr, attrs, buf, len,
2754 addr1, l, mr);
2755 rcu_read_unlock();
2756 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002757
2758 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002759}
2760
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002761MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2762 uint8_t *buf, int len, bool is_write)
2763{
2764 if (is_write) {
2765 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2766 } else {
2767 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2768 }
2769}
Avi Kivityac1970f2012-10-03 16:22:53 +02002770
Avi Kivitya8170e52012-10-23 12:30:10 +02002771void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002772 int len, int is_write)
2773{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002774 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2775 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002776}
2777
Alexander Graf582b55a2013-12-11 14:17:44 +01002778enum write_rom_type {
2779 WRITE_DATA,
2780 FLUSH_CACHE,
2781};
2782
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002783static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002784 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002785{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002786 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002787 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002788 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002789 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002790
Paolo Bonzini41063e12015-03-18 14:21:43 +01002791 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002792 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002793 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002794 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002795
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002796 if (!(memory_region_is_ram(mr) ||
2797 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002798 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002799 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002800 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002801 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002802 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002803 switch (type) {
2804 case WRITE_DATA:
2805 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002806 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002807 break;
2808 case FLUSH_CACHE:
2809 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2810 break;
2811 }
bellardd0ecd2a2006-04-23 17:14:48 +00002812 }
2813 len -= l;
2814 buf += l;
2815 addr += l;
2816 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002817 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002818}
2819
Alexander Graf582b55a2013-12-11 14:17:44 +01002820/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002821void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002822 const uint8_t *buf, int len)
2823{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002824 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002825}
2826
2827void cpu_flush_icache_range(hwaddr start, int len)
2828{
2829 /*
2830 * This function should do the same thing as an icache flush that was
2831 * triggered from within the guest. For TCG we are always cache coherent,
2832 * so there is no need to flush anything. For KVM / Xen we need to flush
2833 * the host's instruction cache at least.
2834 */
2835 if (tcg_enabled()) {
2836 return;
2837 }
2838
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002839 cpu_physical_memory_write_rom_internal(&address_space_memory,
2840 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002841}
2842
aliguori6d16c2f2009-01-22 16:59:11 +00002843typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002844 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002845 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002846 hwaddr addr;
2847 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002848 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002849} BounceBuffer;
2850
2851static BounceBuffer bounce;
2852
aliguoriba223c22009-01-22 16:59:16 +00002853typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002854 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002855 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002856} MapClient;
2857
Fam Zheng38e047b2015-03-16 17:03:35 +08002858QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002859static QLIST_HEAD(map_client_list, MapClient) map_client_list
2860 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002861
Fam Zhenge95205e2015-03-16 17:03:37 +08002862static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002863{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002864 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002865 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002866}
2867
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002868static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002869{
2870 MapClient *client;
2871
Blue Swirl72cf2d42009-09-12 07:36:22 +00002872 while (!QLIST_EMPTY(&map_client_list)) {
2873 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002874 qemu_bh_schedule(client->bh);
2875 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002876 }
2877}
2878
Fam Zhenge95205e2015-03-16 17:03:37 +08002879void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002880{
2881 MapClient *client = g_malloc(sizeof(*client));
2882
Fam Zheng38e047b2015-03-16 17:03:35 +08002883 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002884 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002885 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002886 if (!atomic_read(&bounce.in_use)) {
2887 cpu_notify_map_clients_locked();
2888 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002889 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002890}
2891
Fam Zheng38e047b2015-03-16 17:03:35 +08002892void cpu_exec_init_all(void)
2893{
2894 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002895 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002896 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002897 qemu_mutex_init(&map_client_list_lock);
2898}
2899
Fam Zhenge95205e2015-03-16 17:03:37 +08002900void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002901{
Fam Zhenge95205e2015-03-16 17:03:37 +08002902 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002903
Fam Zhenge95205e2015-03-16 17:03:37 +08002904 qemu_mutex_lock(&map_client_list_lock);
2905 QLIST_FOREACH(client, &map_client_list, link) {
2906 if (client->bh == bh) {
2907 cpu_unregister_map_client_do(client);
2908 break;
2909 }
2910 }
2911 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002912}
2913
2914static void cpu_notify_map_clients(void)
2915{
Fam Zheng38e047b2015-03-16 17:03:35 +08002916 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002917 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002918 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002919}
2920
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002921bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2922{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002923 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002924 hwaddr l, xlat;
2925
Paolo Bonzini41063e12015-03-18 14:21:43 +01002926 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002927 while (len > 0) {
2928 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002929 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2930 if (!memory_access_is_direct(mr, is_write)) {
2931 l = memory_access_size(mr, l, addr);
2932 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002933 return false;
2934 }
2935 }
2936
2937 len -= l;
2938 addr += l;
2939 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002940 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002941 return true;
2942}
2943
aliguori6d16c2f2009-01-22 16:59:11 +00002944/* Map a physical memory region into a host virtual address.
2945 * May map a subset of the requested range, given by and returned in *plen.
2946 * May return NULL if resources needed to perform the mapping are exhausted.
2947 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002948 * Use cpu_register_map_client() to know when retrying the map operation is
2949 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002950 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002951void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002952 hwaddr addr,
2953 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002954 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002955{
Avi Kivitya8170e52012-10-23 12:30:10 +02002956 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002957 hwaddr done = 0;
2958 hwaddr l, xlat, base;
2959 MemoryRegion *mr, *this_mr;
2960 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002961 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002962
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002963 if (len == 0) {
2964 return NULL;
2965 }
aliguori6d16c2f2009-01-22 16:59:11 +00002966
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002967 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002968 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002969 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002970
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002971 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002972 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002973 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002974 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002975 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002976 /* Avoid unbounded allocations */
2977 l = MIN(l, TARGET_PAGE_SIZE);
2978 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002979 bounce.addr = addr;
2980 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002981
2982 memory_region_ref(mr);
2983 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002984 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002985 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2986 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002987 }
aliguori6d16c2f2009-01-22 16:59:11 +00002988
Paolo Bonzini41063e12015-03-18 14:21:43 +01002989 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002990 *plen = l;
2991 return bounce.buffer;
2992 }
2993
2994 base = xlat;
2995 raddr = memory_region_get_ram_addr(mr);
2996
2997 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002998 len -= l;
2999 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003000 done += l;
3001 if (len == 0) {
3002 break;
3003 }
3004
3005 l = len;
3006 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3007 if (this_mr != mr || xlat != base + done) {
3008 break;
3009 }
aliguori6d16c2f2009-01-22 16:59:11 +00003010 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003011
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003012 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003013 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003014 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003015 rcu_read_unlock();
3016
3017 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003018}
3019
Avi Kivityac1970f2012-10-03 16:22:53 +02003020/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003021 * Will also mark the memory as dirty if is_write == 1. access_len gives
3022 * the amount of memory that was actually read or written by the caller.
3023 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003024void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3025 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003026{
3027 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003028 MemoryRegion *mr;
3029 ram_addr_t addr1;
3030
3031 mr = qemu_ram_addr_from_host(buffer, &addr1);
3032 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003033 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003034 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003035 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003036 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003037 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003038 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003039 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003040 return;
3041 }
3042 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003043 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3044 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003045 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003046 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003047 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003048 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003049 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003050 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003051}
bellardd0ecd2a2006-04-23 17:14:48 +00003052
Avi Kivitya8170e52012-10-23 12:30:10 +02003053void *cpu_physical_memory_map(hwaddr addr,
3054 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003055 int is_write)
3056{
3057 return address_space_map(&address_space_memory, addr, plen, is_write);
3058}
3059
Avi Kivitya8170e52012-10-23 12:30:10 +02003060void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3061 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003062{
3063 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3064}
3065
bellard8df1cd02005-01-28 22:37:22 +00003066/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003067static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3068 MemTxAttrs attrs,
3069 MemTxResult *result,
3070 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003071{
bellard8df1cd02005-01-28 22:37:22 +00003072 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003073 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003074 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003075 hwaddr l = 4;
3076 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003077 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003078 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003079
Paolo Bonzini41063e12015-03-18 14:21:43 +01003080 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003081 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003082 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003083 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003084
bellard8df1cd02005-01-28 22:37:22 +00003085 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003086 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003087#if defined(TARGET_WORDS_BIGENDIAN)
3088 if (endian == DEVICE_LITTLE_ENDIAN) {
3089 val = bswap32(val);
3090 }
3091#else
3092 if (endian == DEVICE_BIG_ENDIAN) {
3093 val = bswap32(val);
3094 }
3095#endif
bellard8df1cd02005-01-28 22:37:22 +00003096 } else {
3097 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003098 ptr = qemu_get_ram_ptr(mr->ram_block,
3099 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003100 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003101 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003102 switch (endian) {
3103 case DEVICE_LITTLE_ENDIAN:
3104 val = ldl_le_p(ptr);
3105 break;
3106 case DEVICE_BIG_ENDIAN:
3107 val = ldl_be_p(ptr);
3108 break;
3109 default:
3110 val = ldl_p(ptr);
3111 break;
3112 }
Peter Maydell50013112015-04-26 16:49:24 +01003113 r = MEMTX_OK;
3114 }
3115 if (result) {
3116 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003117 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003118 if (release_lock) {
3119 qemu_mutex_unlock_iothread();
3120 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003121 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003122 return val;
3123}
3124
Peter Maydell50013112015-04-26 16:49:24 +01003125uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3126 MemTxAttrs attrs, MemTxResult *result)
3127{
3128 return address_space_ldl_internal(as, addr, attrs, result,
3129 DEVICE_NATIVE_ENDIAN);
3130}
3131
3132uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3133 MemTxAttrs attrs, MemTxResult *result)
3134{
3135 return address_space_ldl_internal(as, addr, attrs, result,
3136 DEVICE_LITTLE_ENDIAN);
3137}
3138
3139uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3140 MemTxAttrs attrs, MemTxResult *result)
3141{
3142 return address_space_ldl_internal(as, addr, attrs, result,
3143 DEVICE_BIG_ENDIAN);
3144}
3145
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003146uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003147{
Peter Maydell50013112015-04-26 16:49:24 +01003148 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003149}
3150
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003151uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003152{
Peter Maydell50013112015-04-26 16:49:24 +01003153 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003154}
3155
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003156uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003157{
Peter Maydell50013112015-04-26 16:49:24 +01003158 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003159}
3160
bellard84b7b8e2005-11-28 21:19:04 +00003161/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003162static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3163 MemTxAttrs attrs,
3164 MemTxResult *result,
3165 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003166{
bellard84b7b8e2005-11-28 21:19:04 +00003167 uint8_t *ptr;
3168 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003169 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003170 hwaddr l = 8;
3171 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003172 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003173 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003174
Paolo Bonzini41063e12015-03-18 14:21:43 +01003175 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003176 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003177 false);
3178 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003179 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003180
bellard84b7b8e2005-11-28 21:19:04 +00003181 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003182 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003183#if defined(TARGET_WORDS_BIGENDIAN)
3184 if (endian == DEVICE_LITTLE_ENDIAN) {
3185 val = bswap64(val);
3186 }
3187#else
3188 if (endian == DEVICE_BIG_ENDIAN) {
3189 val = bswap64(val);
3190 }
3191#endif
bellard84b7b8e2005-11-28 21:19:04 +00003192 } else {
3193 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003194 ptr = qemu_get_ram_ptr(mr->ram_block,
3195 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003196 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003197 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003198 switch (endian) {
3199 case DEVICE_LITTLE_ENDIAN:
3200 val = ldq_le_p(ptr);
3201 break;
3202 case DEVICE_BIG_ENDIAN:
3203 val = ldq_be_p(ptr);
3204 break;
3205 default:
3206 val = ldq_p(ptr);
3207 break;
3208 }
Peter Maydell50013112015-04-26 16:49:24 +01003209 r = MEMTX_OK;
3210 }
3211 if (result) {
3212 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003213 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003214 if (release_lock) {
3215 qemu_mutex_unlock_iothread();
3216 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003217 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003218 return val;
3219}
3220
Peter Maydell50013112015-04-26 16:49:24 +01003221uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3222 MemTxAttrs attrs, MemTxResult *result)
3223{
3224 return address_space_ldq_internal(as, addr, attrs, result,
3225 DEVICE_NATIVE_ENDIAN);
3226}
3227
3228uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3229 MemTxAttrs attrs, MemTxResult *result)
3230{
3231 return address_space_ldq_internal(as, addr, attrs, result,
3232 DEVICE_LITTLE_ENDIAN);
3233}
3234
3235uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3236 MemTxAttrs attrs, MemTxResult *result)
3237{
3238 return address_space_ldq_internal(as, addr, attrs, result,
3239 DEVICE_BIG_ENDIAN);
3240}
3241
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003242uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003243{
Peter Maydell50013112015-04-26 16:49:24 +01003244 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003245}
3246
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003247uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248{
Peter Maydell50013112015-04-26 16:49:24 +01003249 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250}
3251
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003252uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003253{
Peter Maydell50013112015-04-26 16:49:24 +01003254 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003255}
3256
bellardaab33092005-10-30 20:48:42 +00003257/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003258uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3259 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003260{
3261 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003262 MemTxResult r;
3263
3264 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3265 if (result) {
3266 *result = r;
3267 }
bellardaab33092005-10-30 20:48:42 +00003268 return val;
3269}
3270
Peter Maydell50013112015-04-26 16:49:24 +01003271uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3272{
3273 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3274}
3275
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003276/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003277static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3278 hwaddr addr,
3279 MemTxAttrs attrs,
3280 MemTxResult *result,
3281 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003282{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003283 uint8_t *ptr;
3284 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003285 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003286 hwaddr l = 2;
3287 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003288 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003289 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003290
Paolo Bonzini41063e12015-03-18 14:21:43 +01003291 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003292 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003293 false);
3294 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003295 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003296
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003297 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003298 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003299#if defined(TARGET_WORDS_BIGENDIAN)
3300 if (endian == DEVICE_LITTLE_ENDIAN) {
3301 val = bswap16(val);
3302 }
3303#else
3304 if (endian == DEVICE_BIG_ENDIAN) {
3305 val = bswap16(val);
3306 }
3307#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003308 } else {
3309 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003310 ptr = qemu_get_ram_ptr(mr->ram_block,
3311 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003312 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003313 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003314 switch (endian) {
3315 case DEVICE_LITTLE_ENDIAN:
3316 val = lduw_le_p(ptr);
3317 break;
3318 case DEVICE_BIG_ENDIAN:
3319 val = lduw_be_p(ptr);
3320 break;
3321 default:
3322 val = lduw_p(ptr);
3323 break;
3324 }
Peter Maydell50013112015-04-26 16:49:24 +01003325 r = MEMTX_OK;
3326 }
3327 if (result) {
3328 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003329 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003330 if (release_lock) {
3331 qemu_mutex_unlock_iothread();
3332 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003333 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003334 return val;
bellardaab33092005-10-30 20:48:42 +00003335}
3336
Peter Maydell50013112015-04-26 16:49:24 +01003337uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3338 MemTxAttrs attrs, MemTxResult *result)
3339{
3340 return address_space_lduw_internal(as, addr, attrs, result,
3341 DEVICE_NATIVE_ENDIAN);
3342}
3343
3344uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3345 MemTxAttrs attrs, MemTxResult *result)
3346{
3347 return address_space_lduw_internal(as, addr, attrs, result,
3348 DEVICE_LITTLE_ENDIAN);
3349}
3350
3351uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3352 MemTxAttrs attrs, MemTxResult *result)
3353{
3354 return address_space_lduw_internal(as, addr, attrs, result,
3355 DEVICE_BIG_ENDIAN);
3356}
3357
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003358uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003359{
Peter Maydell50013112015-04-26 16:49:24 +01003360 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003361}
3362
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003363uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003364{
Peter Maydell50013112015-04-26 16:49:24 +01003365 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003366}
3367
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003368uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003369{
Peter Maydell50013112015-04-26 16:49:24 +01003370 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003371}
3372
bellard8df1cd02005-01-28 22:37:22 +00003373/* warning: addr must be aligned. The ram page is not masked as dirty
3374 and the code inside is not invalidated. It is useful if the dirty
3375 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003376void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3377 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003378{
bellard8df1cd02005-01-28 22:37:22 +00003379 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003380 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003381 hwaddr l = 4;
3382 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003383 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003384 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003385 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003386
Paolo Bonzini41063e12015-03-18 14:21:43 +01003387 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003388 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003389 true);
3390 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003391 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003392
Peter Maydell50013112015-04-26 16:49:24 +01003393 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003394 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003395 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003396 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003397 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003398
Paolo Bonzini845b6212015-03-23 11:45:53 +01003399 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3400 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003401 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003402 r = MEMTX_OK;
3403 }
3404 if (result) {
3405 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003406 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003407 if (release_lock) {
3408 qemu_mutex_unlock_iothread();
3409 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003410 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003411}
3412
Peter Maydell50013112015-04-26 16:49:24 +01003413void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3414{
3415 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3416}
3417
bellard8df1cd02005-01-28 22:37:22 +00003418/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003419static inline void address_space_stl_internal(AddressSpace *as,
3420 hwaddr addr, uint32_t val,
3421 MemTxAttrs attrs,
3422 MemTxResult *result,
3423 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003424{
bellard8df1cd02005-01-28 22:37:22 +00003425 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003426 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003427 hwaddr l = 4;
3428 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003429 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003430 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003431
Paolo Bonzini41063e12015-03-18 14:21:43 +01003432 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003433 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003434 true);
3435 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003436 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003437
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003438#if defined(TARGET_WORDS_BIGENDIAN)
3439 if (endian == DEVICE_LITTLE_ENDIAN) {
3440 val = bswap32(val);
3441 }
3442#else
3443 if (endian == DEVICE_BIG_ENDIAN) {
3444 val = bswap32(val);
3445 }
3446#endif
Peter Maydell50013112015-04-26 16:49:24 +01003447 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003448 } else {
bellard8df1cd02005-01-28 22:37:22 +00003449 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003450 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003451 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003452 switch (endian) {
3453 case DEVICE_LITTLE_ENDIAN:
3454 stl_le_p(ptr, val);
3455 break;
3456 case DEVICE_BIG_ENDIAN:
3457 stl_be_p(ptr, val);
3458 break;
3459 default:
3460 stl_p(ptr, val);
3461 break;
3462 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003463 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003464 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003465 }
Peter Maydell50013112015-04-26 16:49:24 +01003466 if (result) {
3467 *result = r;
3468 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003469 if (release_lock) {
3470 qemu_mutex_unlock_iothread();
3471 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003472 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003473}
3474
3475void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3476 MemTxAttrs attrs, MemTxResult *result)
3477{
3478 address_space_stl_internal(as, addr, val, attrs, result,
3479 DEVICE_NATIVE_ENDIAN);
3480}
3481
3482void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3483 MemTxAttrs attrs, MemTxResult *result)
3484{
3485 address_space_stl_internal(as, addr, val, attrs, result,
3486 DEVICE_LITTLE_ENDIAN);
3487}
3488
3489void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3490 MemTxAttrs attrs, MemTxResult *result)
3491{
3492 address_space_stl_internal(as, addr, val, attrs, result,
3493 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003494}
3495
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003496void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003497{
Peter Maydell50013112015-04-26 16:49:24 +01003498 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003499}
3500
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003501void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502{
Peter Maydell50013112015-04-26 16:49:24 +01003503 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003504}
3505
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003506void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507{
Peter Maydell50013112015-04-26 16:49:24 +01003508 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003509}
3510
bellardaab33092005-10-30 20:48:42 +00003511/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003512void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3513 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003514{
3515 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003516 MemTxResult r;
3517
3518 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3519 if (result) {
3520 *result = r;
3521 }
3522}
3523
3524void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3525{
3526 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003527}
3528
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003529/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003530static inline void address_space_stw_internal(AddressSpace *as,
3531 hwaddr addr, uint32_t val,
3532 MemTxAttrs attrs,
3533 MemTxResult *result,
3534 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003535{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003536 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003537 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003538 hwaddr l = 2;
3539 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003540 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003541 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003542
Paolo Bonzini41063e12015-03-18 14:21:43 +01003543 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003544 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003545 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003546 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003547
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003548#if defined(TARGET_WORDS_BIGENDIAN)
3549 if (endian == DEVICE_LITTLE_ENDIAN) {
3550 val = bswap16(val);
3551 }
3552#else
3553 if (endian == DEVICE_BIG_ENDIAN) {
3554 val = bswap16(val);
3555 }
3556#endif
Peter Maydell50013112015-04-26 16:49:24 +01003557 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003558 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003559 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003560 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003561 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003562 switch (endian) {
3563 case DEVICE_LITTLE_ENDIAN:
3564 stw_le_p(ptr, val);
3565 break;
3566 case DEVICE_BIG_ENDIAN:
3567 stw_be_p(ptr, val);
3568 break;
3569 default:
3570 stw_p(ptr, val);
3571 break;
3572 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003573 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003574 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003575 }
Peter Maydell50013112015-04-26 16:49:24 +01003576 if (result) {
3577 *result = r;
3578 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003579 if (release_lock) {
3580 qemu_mutex_unlock_iothread();
3581 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003582 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003583}
3584
3585void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3586 MemTxAttrs attrs, MemTxResult *result)
3587{
3588 address_space_stw_internal(as, addr, val, attrs, result,
3589 DEVICE_NATIVE_ENDIAN);
3590}
3591
3592void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3593 MemTxAttrs attrs, MemTxResult *result)
3594{
3595 address_space_stw_internal(as, addr, val, attrs, result,
3596 DEVICE_LITTLE_ENDIAN);
3597}
3598
3599void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3600 MemTxAttrs attrs, MemTxResult *result)
3601{
3602 address_space_stw_internal(as, addr, val, attrs, result,
3603 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003604}
3605
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003606void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003607{
Peter Maydell50013112015-04-26 16:49:24 +01003608 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003609}
3610
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003611void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003612{
Peter Maydell50013112015-04-26 16:49:24 +01003613 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003614}
3615
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003616void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003617{
Peter Maydell50013112015-04-26 16:49:24 +01003618 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003619}
3620
bellardaab33092005-10-30 20:48:42 +00003621/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003622void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3623 MemTxAttrs attrs, MemTxResult *result)
3624{
3625 MemTxResult r;
3626 val = tswap64(val);
3627 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3628 if (result) {
3629 *result = r;
3630 }
3631}
3632
3633void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3634 MemTxAttrs attrs, MemTxResult *result)
3635{
3636 MemTxResult r;
3637 val = cpu_to_le64(val);
3638 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3639 if (result) {
3640 *result = r;
3641 }
3642}
3643void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3644 MemTxAttrs attrs, MemTxResult *result)
3645{
3646 MemTxResult r;
3647 val = cpu_to_be64(val);
3648 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3649 if (result) {
3650 *result = r;
3651 }
3652}
3653
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003654void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003655{
Peter Maydell50013112015-04-26 16:49:24 +01003656 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003657}
3658
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003659void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003660{
Peter Maydell50013112015-04-26 16:49:24 +01003661 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003662}
3663
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003664void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003665{
Peter Maydell50013112015-04-26 16:49:24 +01003666 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003667}
3668
aliguori5e2972f2009-03-28 17:51:36 +00003669/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003670int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003671 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003672{
3673 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003674 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003675 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003676
3677 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003678 int asidx;
3679 MemTxAttrs attrs;
3680
bellard13eb76e2004-01-24 15:23:36 +00003681 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003682 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3683 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003684 /* if no physical page mapped, return an error */
3685 if (phys_addr == -1)
3686 return -1;
3687 l = (page + TARGET_PAGE_SIZE) - addr;
3688 if (l > len)
3689 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003690 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003691 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003692 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3693 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003694 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003695 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3696 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003697 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003698 }
bellard13eb76e2004-01-24 15:23:36 +00003699 len -= l;
3700 buf += l;
3701 addr += l;
3702 }
3703 return 0;
3704}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003705
3706/*
3707 * Allows code that needs to deal with migration bitmaps etc to still be built
3708 * target independent.
3709 */
3710size_t qemu_target_page_bits(void)
3711{
3712 return TARGET_PAGE_BITS;
3713}
3714
Paul Brooka68fe892010-03-01 00:08:59 +00003715#endif
bellard13eb76e2004-01-24 15:23:36 +00003716
Blue Swirl8e4a4242013-01-06 18:30:17 +00003717/*
3718 * A helper function for the _utterly broken_ virtio device model to find out if
3719 * it's running on a big endian machine. Don't do this at home kids!
3720 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003721bool target_words_bigendian(void);
3722bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003723{
3724#if defined(TARGET_WORDS_BIGENDIAN)
3725 return true;
3726#else
3727 return false;
3728#endif
3729}
3730
Wen Congyang76f35532012-05-07 12:04:18 +08003731#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003732bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003733{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003734 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003735 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003736 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003737
Paolo Bonzini41063e12015-03-18 14:21:43 +01003738 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003739 mr = address_space_translate(&address_space_memory,
3740 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003741
Paolo Bonzini41063e12015-03-18 14:21:43 +01003742 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3743 rcu_read_unlock();
3744 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003745}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003746
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003747int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003748{
3749 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003750 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003751
Mike Day0dc3f442013-09-05 14:41:35 -04003752 rcu_read_lock();
3753 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003754 ret = func(block->idstr, block->host, block->offset,
3755 block->used_length, opaque);
3756 if (ret) {
3757 break;
3758 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003759 }
Mike Day0dc3f442013-09-05 14:41:35 -04003760 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003761 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003762}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003763#endif