blob: e276ec39e12a76ee8200fa3b490f8ee8485bc21c [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020025#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010027#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000028#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020029#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020031#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010032#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010033#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020038#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010041#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020042#include "hw/hw.h"
43#include "exec/memory.h"
44#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020059#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030060#ifndef _WIN32
61#include "qemu/mmap-alloc.h"
62#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020063
blueswir1db7b5422007-05-26 17:36:03 +000064//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000065
pbrook99773bd2006-04-16 15:14:59 +000066#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040067/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
68 * are protected by the ramlist lock.
69 */
Mike Day0d53d9f2015-01-21 13:45:24 +010070RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030071
72static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030073static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030074
Avi Kivityf6790af2012-10-02 20:13:51 +020075AddressSpace address_space_io;
76AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020077
Paolo Bonzini0844e002013-05-24 14:37:28 +020078MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020079static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020080
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080081/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
82#define RAM_PREALLOC (1 << 0)
83
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080084/* RAM is mmap-ed with MAP_SHARED */
85#define RAM_SHARED (1 << 1)
86
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020087/* Only a portion of RAM (used_length) is actually used, and migrated.
88 * This used_length size can change across reboots.
89 */
90#define RAM_RESIZEABLE (1 << 2)
91
pbrooke2eef172008-06-08 01:09:01 +000092#endif
bellard9fa3e852004-01-04 18:06:42 +000093
Andreas Färberbdc44642013-06-24 23:50:24 +020094struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000095/* current CPU in the current thread. It is only valid inside
96 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020097__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000098/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000099 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000100 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100101int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000102
pbrooke2eef172008-06-08 01:09:01 +0000103#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200104
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200105typedef struct PhysPageEntry PhysPageEntry;
106
107struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200108 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200109 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200112};
113
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200114#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
115
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100117#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200119#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120#define P_L2_SIZE (1 << P_L2_BITS)
121
122#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
123
124typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200125
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200126typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100127 struct rcu_head rcu;
128
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200129 unsigned sections_nb;
130 unsigned sections_nb_alloc;
131 unsigned nodes_nb;
132 unsigned nodes_nb_alloc;
133 Node *nodes;
134 MemoryRegionSection *sections;
135} PhysPageMap;
136
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200137struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100138 struct rcu_head rcu;
139
Fam Zheng729633c2016-03-01 14:18:24 +0800140 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141 /* This is a multi-level map on the physical address space.
142 * The bottom level has pointers to MemoryRegionSections.
143 */
144 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200145 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200146 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200147};
148
Jan Kiszka90260c62013-05-26 21:46:51 +0200149#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
150typedef struct subpage_t {
151 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200152 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200153 hwaddr base;
154 uint16_t sub_section[TARGET_PAGE_SIZE];
155} subpage_t;
156
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200157#define PHYS_SECTION_UNASSIGNED 0
158#define PHYS_SECTION_NOTDIRTY 1
159#define PHYS_SECTION_ROM 2
160#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200161
pbrooke2eef172008-06-08 01:09:01 +0000162static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300163static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000164static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000165
Avi Kivity1ec9b902012-01-02 12:47:48 +0200166static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100167
168/**
169 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
170 * @cpu: the CPU whose AddressSpace this is
171 * @as: the AddressSpace itself
172 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
173 * @tcg_as_listener: listener for tracking changes to the AddressSpace
174 */
175struct CPUAddressSpace {
176 CPUState *cpu;
177 AddressSpace *as;
178 struct AddressSpaceDispatch *memory_dispatch;
179 MemoryListener tcg_as_listener;
180};
181
pbrook6658ffb2007-03-16 23:58:11 +0000182#endif
bellard54936002003-05-13 00:25:15 +0000183
Paul Brook6d9a1302010-02-28 23:55:53 +0000184#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200185
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200187{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
189 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
190 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
191 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200192 }
193}
194
Paolo Bonzinidb946042015-05-21 15:12:29 +0200195static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200196{
197 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200198 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199 PhysPageEntry e;
200 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200201
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200202 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200205 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200206
207 e.skip = leaf ? 0 : 1;
208 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100209 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200212 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200213}
214
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200215static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
216 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200217 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218{
219 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100220 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200221
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200222 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200224 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200225 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200227
Paolo Bonzini03f49952013-11-07 17:14:36 +0100228 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200229 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200230 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200231 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200232 *index += step;
233 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200234 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200235 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200236 }
237 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200238 }
239}
240
Avi Kivityac1970f2012-10-03 16:22:53 +0200241static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200242 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200243 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000244{
Avi Kivity29990972012-02-13 20:21:20 +0200245 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000247
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200248 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000249}
250
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200251/* Compact a non leaf page entry. Simply detect that the entry has a single child,
252 * and update our entry so we can skip it and go directly to the destination.
253 */
254static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
255{
256 unsigned valid_ptr = P_L2_SIZE;
257 int valid = 0;
258 PhysPageEntry *p;
259 int i;
260
261 if (lp->ptr == PHYS_MAP_NODE_NIL) {
262 return;
263 }
264
265 p = nodes[lp->ptr];
266 for (i = 0; i < P_L2_SIZE; i++) {
267 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
268 continue;
269 }
270
271 valid_ptr = i;
272 valid++;
273 if (p[i].skip) {
274 phys_page_compact(&p[i], nodes, compacted);
275 }
276 }
277
278 /* We can only compress if there's only one child. */
279 if (valid != 1) {
280 return;
281 }
282
283 assert(valid_ptr < P_L2_SIZE);
284
285 /* Don't compress if it won't fit in the # of bits we have. */
286 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
287 return;
288 }
289
290 lp->ptr = p[valid_ptr].ptr;
291 if (!p[valid_ptr].skip) {
292 /* If our only child is a leaf, make this a leaf. */
293 /* By design, we should have made this node a leaf to begin with so we
294 * should never reach here.
295 * But since it's so simple to handle this, let's do it just in case we
296 * change this rule.
297 */
298 lp->skip = 0;
299 } else {
300 lp->skip += p[valid_ptr].skip;
301 }
302}
303
304static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
305{
306 DECLARE_BITMAP(compacted, nodes_nb);
307
308 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200309 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200310 }
311}
312
Fam Zheng29cb5332016-03-01 14:18:23 +0800313static inline bool section_covers_addr(const MemoryRegionSection *section,
314 hwaddr addr)
315{
316 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
317 * the section must cover the entire address space.
318 */
319 return section->size.hi ||
320 range_covers_byte(section->offset_within_address_space,
321 section->size.lo, addr);
322}
323
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200324static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200325 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000326{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200327 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200328 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200329 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200330
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200331 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200332 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200333 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200334 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200335 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100336 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200337 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200338
Fam Zheng29cb5332016-03-01 14:18:23 +0800339 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200340 return &sections[lp.ptr];
341 } else {
342 return &sections[PHYS_SECTION_UNASSIGNED];
343 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200344}
345
Blue Swirle5548612012-04-21 13:08:33 +0000346bool memory_region_is_unassigned(MemoryRegion *mr)
347{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200348 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000349 && mr != &io_mem_watch;
350}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200351
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100352/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200353static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200354 hwaddr addr,
355 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200356{
Fam Zheng729633c2016-03-01 14:18:24 +0800357 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200358 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800359 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200360
Fam Zheng729633c2016-03-01 14:18:24 +0800361 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
362 section_covers_addr(section, addr)) {
363 update = false;
364 } else {
365 section = phys_page_find(d->phys_map, addr, d->map.nodes,
366 d->map.sections);
367 update = true;
368 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200369 if (resolve_subpage && section->mr->subpage) {
370 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200371 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200372 }
Fam Zheng729633c2016-03-01 14:18:24 +0800373 if (update) {
374 atomic_set(&d->mru_section, section);
375 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200376 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200377}
378
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100379/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200380static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200381address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200382 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200383{
384 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200385 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100386 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200388 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200389 /* Compute offset within MemoryRegionSection */
390 addr -= section->offset_within_address_space;
391
392 /* Compute offset within MemoryRegion */
393 *xlat = addr + section->offset_within_region;
394
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200395 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200396
397 /* MMIO registers can be expected to perform full-width accesses based only
398 * on their address, without considering adjacent registers that could
399 * decode to completely different MemoryRegions. When such registers
400 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
401 * regions overlap wildly. For this reason we cannot clamp the accesses
402 * here.
403 *
404 * If the length is small (as is the case for address_space_ldl/stl),
405 * everything works fine. If the incoming length is large, however,
406 * the caller really has to do the clamping through memory_access_size.
407 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200408 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200409 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200410 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
411 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200412 return section;
413}
Jan Kiszka90260c62013-05-26 21:46:51 +0200414
Paolo Bonzini41063e12015-03-18 14:21:43 +0100415/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200416MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
417 hwaddr *xlat, hwaddr *plen,
418 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200419{
Avi Kivity30951152012-10-30 13:47:46 +0200420 IOMMUTLBEntry iotlb;
421 MemoryRegionSection *section;
422 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200423
424 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100425 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
426 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200427 mr = section->mr;
428
429 if (!mr->iommu_ops) {
430 break;
431 }
432
Le Tan8d7b8cb2014-08-16 13:55:37 +0800433 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200434 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
435 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700436 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200437 if (!(iotlb.perm & (1 << is_write))) {
438 mr = &io_mem_unassigned;
439 break;
440 }
441
442 as = iotlb.target_as;
443 }
444
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000445 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100446 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700447 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100448 }
449
Avi Kivity30951152012-10-30 13:47:46 +0200450 *xlat = addr;
451 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200452}
453
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100454/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200455MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000456address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200457 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200458{
Avi Kivity30951152012-10-30 13:47:46 +0200459 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000460 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
461
462 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200463
464 assert(!section->mr->iommu_ops);
465 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200466}
bellard9fa3e852004-01-04 18:06:42 +0000467#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000468
Andreas Färberb170fce2013-01-20 20:23:22 +0100469#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000470
Juan Quintelae59fb372009-09-29 22:48:21 +0200471static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200472{
Andreas Färber259186a2013-01-17 18:51:17 +0100473 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474
aurel323098dba2009-03-07 21:28:24 +0000475 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
476 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100477 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100478 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000479
480 return 0;
481}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200482
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400483static int cpu_common_pre_load(void *opaque)
484{
485 CPUState *cpu = opaque;
486
Paolo Bonziniadee6422014-12-19 12:53:14 +0100487 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400488
489 return 0;
490}
491
492static bool cpu_common_exception_index_needed(void *opaque)
493{
494 CPUState *cpu = opaque;
495
Paolo Bonziniadee6422014-12-19 12:53:14 +0100496 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400497}
498
499static const VMStateDescription vmstate_cpu_common_exception_index = {
500 .name = "cpu_common/exception_index",
501 .version_id = 1,
502 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200503 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400504 .fields = (VMStateField[]) {
505 VMSTATE_INT32(exception_index, CPUState),
506 VMSTATE_END_OF_LIST()
507 }
508};
509
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300510static bool cpu_common_crash_occurred_needed(void *opaque)
511{
512 CPUState *cpu = opaque;
513
514 return cpu->crash_occurred;
515}
516
517static const VMStateDescription vmstate_cpu_common_crash_occurred = {
518 .name = "cpu_common/crash_occurred",
519 .version_id = 1,
520 .minimum_version_id = 1,
521 .needed = cpu_common_crash_occurred_needed,
522 .fields = (VMStateField[]) {
523 VMSTATE_BOOL(crash_occurred, CPUState),
524 VMSTATE_END_OF_LIST()
525 }
526};
527
Andreas Färber1a1562f2013-06-17 04:09:11 +0200528const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200529 .name = "cpu_common",
530 .version_id = 1,
531 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400532 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200533 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200534 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100535 VMSTATE_UINT32(halted, CPUState),
536 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200537 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400538 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200539 .subsections = (const VMStateDescription*[]) {
540 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300541 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200542 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200543 }
544};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200545
pbrook9656f322008-07-01 20:01:19 +0000546#endif
547
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100548CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400549{
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400551
Andreas Färberbdc44642013-06-24 23:50:24 +0200552 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100553 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100555 }
Glauber Costa950f1472009-06-09 12:15:18 -0400556 }
557
Andreas Färberbdc44642013-06-24 23:50:24 +0200558 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400559}
560
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000561#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000562void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000563{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000564 CPUAddressSpace *newas;
565
566 /* Target code should have set num_ases before calling us */
567 assert(asidx < cpu->num_ases);
568
Peter Maydell56943e82016-01-21 14:15:04 +0000569 if (asidx == 0) {
570 /* address space 0 gets the convenience alias */
571 cpu->as = as;
572 }
573
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000574 /* KVM cannot currently support multiple address spaces. */
575 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000576
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000577 if (!cpu->cpu_ases) {
578 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000579 }
Peter Maydell32857f42015-10-01 15:29:50 +0100580
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000581 newas = &cpu->cpu_ases[asidx];
582 newas->cpu = cpu;
583 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000584 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000585 newas->tcg_as_listener.commit = tcg_commit;
586 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000587 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000588}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000589
590AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
591{
592 /* Return the AddressSpace corresponding to the specified index */
593 return cpu->cpu_ases[asidx].as;
594}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000595#endif
596
Bharata B Raob7bca732015-06-23 19:31:13 -0700597#ifndef CONFIG_USER_ONLY
598static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
599
600static int cpu_get_free_index(Error **errp)
601{
602 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
603
604 if (cpu >= MAX_CPUMASK_BITS) {
605 error_setg(errp, "Trying to use more CPUs than max of %d",
606 MAX_CPUMASK_BITS);
607 return -1;
608 }
609
610 bitmap_set(cpu_index_map, cpu, 1);
611 return cpu;
612}
613
614void cpu_exec_exit(CPUState *cpu)
615{
616 if (cpu->cpu_index == -1) {
617 /* cpu_index was never allocated by this @cpu or was already freed. */
618 return;
619 }
620
621 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
622 cpu->cpu_index = -1;
623}
624#else
625
626static int cpu_get_free_index(Error **errp)
627{
628 CPUState *some_cpu;
629 int cpu_index = 0;
630
631 CPU_FOREACH(some_cpu) {
632 cpu_index++;
633 }
634 return cpu_index;
635}
636
637void cpu_exec_exit(CPUState *cpu)
638{
639}
640#endif
641
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700642void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000643{
Andreas Färberb170fce2013-01-20 20:23:22 +0100644 CPUClass *cc = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700645 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000646
Peter Maydell56943e82016-01-21 14:15:04 +0000647 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000648 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000649
Eduardo Habkost291135b2015-04-27 17:00:33 -0300650#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300651 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000652
653 /* This is a softmmu CPU object, so create a property for it
654 * so users can wire up its memory. (This can't go in qom/cpu.c
655 * because that file is compiled only once for both user-mode
656 * and system builds.) The default if no link is set up is to use
657 * the system address space.
658 */
659 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
660 (Object **)&cpu->memory,
661 qdev_prop_allow_set_link_before_realize,
662 OBJ_PROP_LINK_UNREF_ON_RELEASE,
663 &error_abort);
664 cpu->memory = system_memory;
665 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300666#endif
667
pbrookc2764712009-03-07 15:24:59 +0000668#if defined(CONFIG_USER_ONLY)
669 cpu_list_lock();
670#endif
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200671 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700672 if (local_err) {
673 error_propagate(errp, local_err);
674#if defined(CONFIG_USER_ONLY)
675 cpu_list_unlock();
676#endif
677 return;
bellard6a00d602005-11-21 23:25:50 +0000678 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200679 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000680#if defined(CONFIG_USER_ONLY)
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200681 (void) cc;
pbrookc2764712009-03-07 15:24:59 +0000682 cpu_list_unlock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200683#else
Andreas Färbere0d47942013-07-29 04:07:50 +0200684 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200685 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200686 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100687 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200688 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100689 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200690#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000691}
692
Paul Brook94df27f2010-02-28 23:47:45 +0000693#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200694static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000695{
696 tb_invalidate_phys_page_range(pc, pc + 1, 0);
697}
698#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200699static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400700{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000701 MemTxAttrs attrs;
702 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
703 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400704 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000705 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100706 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400707 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400708}
bellardc27004e2005-01-03 23:35:10 +0000709#endif
bellardd720b932004-04-25 17:57:43 +0000710
Paul Brookc527ee82010-03-01 03:31:14 +0000711#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200712void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000713
714{
715}
716
Peter Maydell3ee887e2014-09-12 14:06:48 +0100717int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
718 int flags)
719{
720 return -ENOSYS;
721}
722
723void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
724{
725}
726
Andreas Färber75a34032013-09-02 16:57:02 +0200727int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000728 int flags, CPUWatchpoint **watchpoint)
729{
730 return -ENOSYS;
731}
732#else
pbrook6658ffb2007-03-16 23:58:11 +0000733/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200734int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000735 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000736{
aliguoric0ce9982008-11-25 22:13:57 +0000737 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000738
Peter Maydell05068c02014-09-12 14:06:48 +0100739 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700740 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200741 error_report("tried to set invalid watchpoint at %"
742 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000743 return -EINVAL;
744 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500745 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000746
aliguoria1d1bb32008-11-18 20:07:32 +0000747 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100748 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000749 wp->flags = flags;
750
aliguori2dc9f412008-11-18 20:56:59 +0000751 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200752 if (flags & BP_GDB) {
753 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
754 } else {
755 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
756 }
aliguoria1d1bb32008-11-18 20:07:32 +0000757
Andreas Färber31b030d2013-09-04 01:29:02 +0200758 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000759
760 if (watchpoint)
761 *watchpoint = wp;
762 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000763}
764
aliguoria1d1bb32008-11-18 20:07:32 +0000765/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200766int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000767 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000768{
aliguoria1d1bb32008-11-18 20:07:32 +0000769 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000770
Andreas Färberff4700b2013-08-26 18:23:18 +0200771 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100772 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000773 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200774 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000775 return 0;
776 }
777 }
aliguoria1d1bb32008-11-18 20:07:32 +0000778 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000779}
780
aliguoria1d1bb32008-11-18 20:07:32 +0000781/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200782void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000783{
Andreas Färberff4700b2013-08-26 18:23:18 +0200784 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000785
Andreas Färber31b030d2013-09-04 01:29:02 +0200786 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000787
Anthony Liguori7267c092011-08-20 22:09:37 -0500788 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000789}
790
aliguoria1d1bb32008-11-18 20:07:32 +0000791/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200792void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000793{
aliguoric0ce9982008-11-25 22:13:57 +0000794 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000795
Andreas Färberff4700b2013-08-26 18:23:18 +0200796 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200797 if (wp->flags & mask) {
798 cpu_watchpoint_remove_by_ref(cpu, wp);
799 }
aliguoric0ce9982008-11-25 22:13:57 +0000800 }
aliguoria1d1bb32008-11-18 20:07:32 +0000801}
Peter Maydell05068c02014-09-12 14:06:48 +0100802
803/* Return true if this watchpoint address matches the specified
804 * access (ie the address range covered by the watchpoint overlaps
805 * partially or completely with the address range covered by the
806 * access).
807 */
808static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
809 vaddr addr,
810 vaddr len)
811{
812 /* We know the lengths are non-zero, but a little caution is
813 * required to avoid errors in the case where the range ends
814 * exactly at the top of the address space and so addr + len
815 * wraps round to zero.
816 */
817 vaddr wpend = wp->vaddr + wp->len - 1;
818 vaddr addrend = addr + len - 1;
819
820 return !(addr > wpend || wp->vaddr > addrend);
821}
822
Paul Brookc527ee82010-03-01 03:31:14 +0000823#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000824
825/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200826int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000827 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000828{
aliguoric0ce9982008-11-25 22:13:57 +0000829 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000830
Anthony Liguori7267c092011-08-20 22:09:37 -0500831 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000832
833 bp->pc = pc;
834 bp->flags = flags;
835
aliguori2dc9f412008-11-18 20:56:59 +0000836 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200837 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200838 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200839 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200840 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200841 }
aliguoria1d1bb32008-11-18 20:07:32 +0000842
Andreas Färberf0c3c502013-08-26 21:22:53 +0200843 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000844
Andreas Färber00b941e2013-06-29 18:55:54 +0200845 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000846 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200847 }
aliguoria1d1bb32008-11-18 20:07:32 +0000848 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000849}
850
851/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200852int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000853{
aliguoria1d1bb32008-11-18 20:07:32 +0000854 CPUBreakpoint *bp;
855
Andreas Färberf0c3c502013-08-26 21:22:53 +0200856 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000857 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200858 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000859 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000860 }
bellard4c3a88a2003-07-26 12:06:08 +0000861 }
aliguoria1d1bb32008-11-18 20:07:32 +0000862 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000863}
864
aliguoria1d1bb32008-11-18 20:07:32 +0000865/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200866void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000867{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200868 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
869
870 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000871
Anthony Liguori7267c092011-08-20 22:09:37 -0500872 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000873}
874
875/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200876void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000877{
aliguoric0ce9982008-11-25 22:13:57 +0000878 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000879
Andreas Färberf0c3c502013-08-26 21:22:53 +0200880 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200881 if (bp->flags & mask) {
882 cpu_breakpoint_remove_by_ref(cpu, bp);
883 }
aliguoric0ce9982008-11-25 22:13:57 +0000884 }
bellard4c3a88a2003-07-26 12:06:08 +0000885}
886
bellardc33a3462003-07-29 20:50:33 +0000887/* enable or disable single step mode. EXCP_DEBUG is returned by the
888 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200889void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000890{
Andreas Färbered2803d2013-06-21 20:20:45 +0200891 if (cpu->singlestep_enabled != enabled) {
892 cpu->singlestep_enabled = enabled;
893 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200894 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200895 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100896 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000897 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700898 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000899 }
bellardc33a3462003-07-29 20:50:33 +0000900 }
bellardc33a3462003-07-29 20:50:33 +0000901}
902
Andreas Färbera47dddd2013-09-03 17:38:47 +0200903void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000904{
905 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000906 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000907
908 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000909 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000910 fprintf(stderr, "qemu: fatal: ");
911 vfprintf(stderr, fmt, ap);
912 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200913 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100914 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000915 qemu_log("qemu: fatal: ");
916 qemu_log_vprintf(fmt, ap2);
917 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200918 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000919 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000920 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000921 }
pbrook493ae1f2007-11-23 16:53:59 +0000922 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000923 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300924 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200925#if defined(CONFIG_USER_ONLY)
926 {
927 struct sigaction act;
928 sigfillset(&act.sa_mask);
929 act.sa_handler = SIG_DFL;
930 sigaction(SIGABRT, &act, NULL);
931 }
932#endif
bellard75012672003-06-21 13:11:07 +0000933 abort();
934}
935
bellard01243112004-01-04 15:48:17 +0000936#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400937/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200938static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
939{
940 RAMBlock *block;
941
Paolo Bonzini43771532013-09-09 17:58:40 +0200942 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200943 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200944 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200945 }
Mike Day0dc3f442013-09-05 14:41:35 -0400946 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200947 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200948 goto found;
949 }
950 }
951
952 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
953 abort();
954
955found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200956 /* It is safe to write mru_block outside the iothread lock. This
957 * is what happens:
958 *
959 * mru_block = xxx
960 * rcu_read_unlock()
961 * xxx removed from list
962 * rcu_read_lock()
963 * read mru_block
964 * mru_block = NULL;
965 * call_rcu(reclaim_ramblock, xxx);
966 * rcu_read_unlock()
967 *
968 * atomic_rcu_set is not needed here. The block was already published
969 * when it was placed into the list. Here we're just making an extra
970 * copy of the pointer.
971 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200972 ram_list.mru_block = block;
973 return block;
974}
975
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200976static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000977{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700978 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200979 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200980 RAMBlock *block;
981 ram_addr_t end;
982
983 end = TARGET_PAGE_ALIGN(start + length);
984 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000985
Mike Day0dc3f442013-09-05 14:41:35 -0400986 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200987 block = qemu_get_ram_block(start);
988 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200989 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700990 CPU_FOREACH(cpu) {
991 tlb_reset_dirty(cpu, start1, length);
992 }
Mike Day0dc3f442013-09-05 14:41:35 -0400993 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200994}
995
996/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000997bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
998 ram_addr_t length,
999 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001000{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001001 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001002 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001003 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001004
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001005 if (length == 0) {
1006 return false;
1007 }
1008
1009 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1010 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001011
1012 rcu_read_lock();
1013
1014 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1015
1016 while (page < end) {
1017 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1018 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1019 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1020
1021 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1022 offset, num);
1023 page += num;
1024 }
1025
1026 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001027
1028 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001029 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001030 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001031
1032 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001033}
1034
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001035/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001036hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001037 MemoryRegionSection *section,
1038 target_ulong vaddr,
1039 hwaddr paddr, hwaddr xlat,
1040 int prot,
1041 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001042{
Avi Kivitya8170e52012-10-23 12:30:10 +02001043 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001044 CPUWatchpoint *wp;
1045
Blue Swirlcc5bea62012-04-14 14:56:48 +00001046 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001047 /* Normal RAM. */
1048 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001049 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001050 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001051 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001052 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001053 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001054 }
1055 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001056 AddressSpaceDispatch *d;
1057
1058 d = atomic_rcu_read(&section->address_space->dispatch);
1059 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001060 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001061 }
1062
1063 /* Make accesses to pages with watchpoints go via the
1064 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001065 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001066 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001067 /* Avoid trapping reads of pages with a write breakpoint. */
1068 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001069 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001070 *address |= TLB_MMIO;
1071 break;
1072 }
1073 }
1074 }
1075
1076 return iotlb;
1077}
bellard9fa3e852004-01-04 18:06:42 +00001078#endif /* defined(CONFIG_USER_ONLY) */
1079
pbrooke2eef172008-06-08 01:09:01 +00001080#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001081
Anthony Liguoric227f092009-10-01 16:12:16 -05001082static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001083 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001084static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001085
Igor Mammedova2b257d2014-10-31 16:38:37 +00001086static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1087 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001088
1089/*
1090 * Set a custom physical guest memory alloator.
1091 * Accelerators with unusual needs may need this. Hopefully, we can
1092 * get rid of it eventually.
1093 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001094void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001095{
1096 phys_mem_alloc = alloc;
1097}
1098
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099static uint16_t phys_section_add(PhysPageMap *map,
1100 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001101{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001102 /* The physical section number is ORed with a page-aligned
1103 * pointer to produce the iotlb entries. Thus it should
1104 * never overflow into the page-aligned value.
1105 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001107
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001108 if (map->sections_nb == map->sections_nb_alloc) {
1109 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1110 map->sections = g_renew(MemoryRegionSection, map->sections,
1111 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001112 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001113 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001114 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001115 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001116}
1117
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001118static void phys_section_destroy(MemoryRegion *mr)
1119{
Don Slutz55b4e802015-11-30 17:11:04 -05001120 bool have_sub_page = mr->subpage;
1121
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001122 memory_region_unref(mr);
1123
Don Slutz55b4e802015-11-30 17:11:04 -05001124 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001125 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001126 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001127 g_free(subpage);
1128 }
1129}
1130
Paolo Bonzini60926662013-05-29 12:30:26 +02001131static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001132{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001133 while (map->sections_nb > 0) {
1134 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001135 phys_section_destroy(section->mr);
1136 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001137 g_free(map->sections);
1138 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001139}
1140
Avi Kivityac1970f2012-10-03 16:22:53 +02001141static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001142{
1143 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001144 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001145 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001146 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001147 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001148 MemoryRegionSection subsection = {
1149 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001150 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001151 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001152 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001153
Avi Kivityf3705d52012-03-08 16:16:34 +02001154 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001155
Avi Kivityf3705d52012-03-08 16:16:34 +02001156 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001157 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001158 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001160 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001161 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001162 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001163 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001164 }
1165 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001166 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001167 subpage_register(subpage, start, end,
1168 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001169}
1170
1171
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001172static void register_multipage(AddressSpaceDispatch *d,
1173 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001174{
Avi Kivitya8170e52012-10-23 12:30:10 +02001175 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001176 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001177 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1178 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001179
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001180 assert(num_pages);
1181 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001182}
1183
Avi Kivityac1970f2012-10-03 16:22:53 +02001184static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001185{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001186 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001187 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001188 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001189 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001190
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001191 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1192 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1193 - now.offset_within_address_space;
1194
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001195 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001196 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001197 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001199 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001200 while (int128_ne(remain.size, now.size)) {
1201 remain.size = int128_sub(remain.size, now.size);
1202 remain.offset_within_address_space += int128_get64(now.size);
1203 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001204 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001205 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001206 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001207 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001208 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001209 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001210 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001211 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001212 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001213 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001214 }
1215}
1216
Sheng Yang62a27442010-01-26 19:21:16 +08001217void qemu_flush_coalesced_mmio_buffer(void)
1218{
1219 if (kvm_enabled())
1220 kvm_flush_coalesced_mmio_buffer();
1221}
1222
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001223void qemu_mutex_lock_ramlist(void)
1224{
1225 qemu_mutex_lock(&ram_list.mutex);
1226}
1227
1228void qemu_mutex_unlock_ramlist(void)
1229{
1230 qemu_mutex_unlock(&ram_list.mutex);
1231}
1232
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001233#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001234static void *file_ram_alloc(RAMBlock *block,
1235 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001236 const char *path,
1237 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001238{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001239 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001240 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001241 char *sanitized_name;
1242 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001243 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001244 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001245 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001246
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001247 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1248 error_setg(errp,
1249 "host lacks kvm mmu notifiers, -mem-path unsupported");
1250 return NULL;
1251 }
1252
1253 for (;;) {
1254 fd = open(path, O_RDWR);
1255 if (fd >= 0) {
1256 /* @path names an existing file, use it */
1257 break;
1258 }
1259 if (errno == ENOENT) {
1260 /* @path names a file that doesn't exist, create it */
1261 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1262 if (fd >= 0) {
1263 unlink_on_error = true;
1264 break;
1265 }
1266 } else if (errno == EISDIR) {
1267 /* @path names a directory, create a file there */
1268 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1269 sanitized_name = g_strdup(memory_region_name(block->mr));
1270 for (c = sanitized_name; *c != '\0'; c++) {
1271 if (*c == '/') {
1272 *c = '_';
1273 }
1274 }
1275
1276 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1277 sanitized_name);
1278 g_free(sanitized_name);
1279
1280 fd = mkstemp(filename);
1281 if (fd >= 0) {
1282 unlink(filename);
1283 g_free(filename);
1284 break;
1285 }
1286 g_free(filename);
1287 }
1288 if (errno != EEXIST && errno != EINTR) {
1289 error_setg_errno(errp, errno,
1290 "can't open backing store %s for guest RAM",
1291 path);
1292 goto error;
1293 }
1294 /*
1295 * Try again on EINTR and EEXIST. The latter happens when
1296 * something else creates the file between our two open().
1297 */
1298 }
1299
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001300 page_size = qemu_fd_getpagesize(fd);
1301 block->mr->align = page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001302
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001303 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001304 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001305 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001306 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001307 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001308 }
1309
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001310 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001311
1312 /*
1313 * ftruncate is not supported by hugetlbfs in older
1314 * hosts, so don't bother bailing out on errors.
1315 * If anything goes wrong with it under other filesystems,
1316 * mmap will fail.
1317 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001318 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001319 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001320 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001321
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001322 area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001323 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001324 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001325 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001326 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001327 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001328
1329 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001330 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001331 }
1332
Alex Williamson04b16652010-07-02 11:13:17 -06001333 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001334 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001335
1336error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001337 if (unlink_on_error) {
1338 unlink(path);
1339 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001340 if (fd != -1) {
1341 close(fd);
1342 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001343 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001344}
1345#endif
1346
Mike Day0dc3f442013-09-05 14:41:35 -04001347/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001348static ram_addr_t find_ram_offset(ram_addr_t size)
1349{
Alex Williamson04b16652010-07-02 11:13:17 -06001350 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001351 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001352
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001353 assert(size != 0); /* it would hand out same offset multiple times */
1354
Mike Day0dc3f442013-09-05 14:41:35 -04001355 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001356 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001357 }
Alex Williamson04b16652010-07-02 11:13:17 -06001358
Mike Day0dc3f442013-09-05 14:41:35 -04001359 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001360 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001361
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001362 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001363
Mike Day0dc3f442013-09-05 14:41:35 -04001364 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001365 if (next_block->offset >= end) {
1366 next = MIN(next, next_block->offset);
1367 }
1368 }
1369 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001370 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001371 mingap = next - end;
1372 }
1373 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001374
1375 if (offset == RAM_ADDR_MAX) {
1376 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1377 (uint64_t)size);
1378 abort();
1379 }
1380
Alex Williamson04b16652010-07-02 11:13:17 -06001381 return offset;
1382}
1383
Juan Quintela652d7ec2012-07-20 10:37:54 +02001384ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001385{
Alex Williamsond17b5282010-06-25 11:08:38 -06001386 RAMBlock *block;
1387 ram_addr_t last = 0;
1388
Mike Day0dc3f442013-09-05 14:41:35 -04001389 rcu_read_lock();
1390 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001391 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001392 }
Mike Day0dc3f442013-09-05 14:41:35 -04001393 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001394 return last;
1395}
1396
Jason Baronddb97f12012-08-02 15:44:16 -04001397static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1398{
1399 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001400
1401 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001402 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001403 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1404 if (ret) {
1405 perror("qemu_madvise");
1406 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1407 "but dump_guest_core=off specified\n");
1408 }
1409 }
1410}
1411
Mike Day0dc3f442013-09-05 14:41:35 -04001412/* Called within an RCU critical section, or while the ramlist lock
1413 * is held.
1414 */
Hu Tao20cfe882014-04-02 15:13:26 +08001415static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001416{
Hu Tao20cfe882014-04-02 15:13:26 +08001417 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001418
Mike Day0dc3f442013-09-05 14:41:35 -04001419 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001420 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001421 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001422 }
1423 }
Hu Tao20cfe882014-04-02 15:13:26 +08001424
1425 return NULL;
1426}
1427
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001428const char *qemu_ram_get_idstr(RAMBlock *rb)
1429{
1430 return rb->idstr;
1431}
1432
Mike Dayae3a7042013-09-05 14:41:35 -04001433/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001434void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1435{
Mike Dayae3a7042013-09-05 14:41:35 -04001436 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001437
Mike Day0dc3f442013-09-05 14:41:35 -04001438 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001439 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001440 assert(new_block);
1441 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001442
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001443 if (dev) {
1444 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001445 if (id) {
1446 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001447 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001448 }
1449 }
1450 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1451
Mike Day0dc3f442013-09-05 14:41:35 -04001452 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001453 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001454 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1455 new_block->idstr);
1456 abort();
1457 }
1458 }
Mike Day0dc3f442013-09-05 14:41:35 -04001459 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001460}
1461
Mike Dayae3a7042013-09-05 14:41:35 -04001462/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001463void qemu_ram_unset_idstr(ram_addr_t addr)
1464{
Mike Dayae3a7042013-09-05 14:41:35 -04001465 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001466
Mike Dayae3a7042013-09-05 14:41:35 -04001467 /* FIXME: arch_init.c assumes that this is not called throughout
1468 * migration. Ignore the problem since hot-unplug during migration
1469 * does not work anyway.
1470 */
1471
Mike Day0dc3f442013-09-05 14:41:35 -04001472 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001473 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001474 if (block) {
1475 memset(block->idstr, 0, sizeof(block->idstr));
1476 }
Mike Day0dc3f442013-09-05 14:41:35 -04001477 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001478}
1479
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001480static int memory_try_enable_merging(void *addr, size_t len)
1481{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001482 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001483 /* disabled by the user */
1484 return 0;
1485 }
1486
1487 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1488}
1489
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001490/* Only legal before guest might have detected the memory size: e.g. on
1491 * incoming migration, or right after reset.
1492 *
1493 * As memory core doesn't know how is memory accessed, it is up to
1494 * resize callback to update device state and/or add assertions to detect
1495 * misuse, if necessary.
1496 */
1497int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1498{
1499 RAMBlock *block = find_ram_block(base);
1500
1501 assert(block);
1502
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001503 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001504
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001505 if (block->used_length == newsize) {
1506 return 0;
1507 }
1508
1509 if (!(block->flags & RAM_RESIZEABLE)) {
1510 error_setg_errno(errp, EINVAL,
1511 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1512 " in != 0x" RAM_ADDR_FMT, block->idstr,
1513 newsize, block->used_length);
1514 return -EINVAL;
1515 }
1516
1517 if (block->max_length < newsize) {
1518 error_setg_errno(errp, EINVAL,
1519 "Length too large: %s: 0x" RAM_ADDR_FMT
1520 " > 0x" RAM_ADDR_FMT, block->idstr,
1521 newsize, block->max_length);
1522 return -EINVAL;
1523 }
1524
1525 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1526 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001527 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1528 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001529 memory_region_set_size(block->mr, newsize);
1530 if (block->resized) {
1531 block->resized(block->idstr, newsize, block->host);
1532 }
1533 return 0;
1534}
1535
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001536/* Called with ram_list.mutex held */
1537static void dirty_memory_extend(ram_addr_t old_ram_size,
1538 ram_addr_t new_ram_size)
1539{
1540 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1541 DIRTY_MEMORY_BLOCK_SIZE);
1542 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1543 DIRTY_MEMORY_BLOCK_SIZE);
1544 int i;
1545
1546 /* Only need to extend if block count increased */
1547 if (new_num_blocks <= old_num_blocks) {
1548 return;
1549 }
1550
1551 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1552 DirtyMemoryBlocks *old_blocks;
1553 DirtyMemoryBlocks *new_blocks;
1554 int j;
1555
1556 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1557 new_blocks = g_malloc(sizeof(*new_blocks) +
1558 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1559
1560 if (old_num_blocks) {
1561 memcpy(new_blocks->blocks, old_blocks->blocks,
1562 old_num_blocks * sizeof(old_blocks->blocks[0]));
1563 }
1564
1565 for (j = old_num_blocks; j < new_num_blocks; j++) {
1566 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1567 }
1568
1569 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1570
1571 if (old_blocks) {
1572 g_free_rcu(old_blocks, rcu);
1573 }
1574 }
1575}
1576
Fam Zheng528f46a2016-03-01 14:18:18 +08001577static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001578{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001579 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001580 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001581 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001582 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001583
1584 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001585
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001586 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001587 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001588
1589 if (!new_block->host) {
1590 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001591 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001592 new_block->mr, &err);
1593 if (err) {
1594 error_propagate(errp, err);
1595 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001596 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001597 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001598 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001599 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001600 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001601 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001602 error_setg_errno(errp, errno,
1603 "cannot set up guest memory '%s'",
1604 memory_region_name(new_block->mr));
1605 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001606 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001607 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001608 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001609 }
1610 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001611
Li Zhijiandd631692015-07-02 20:18:06 +08001612 new_ram_size = MAX(old_ram_size,
1613 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1614 if (new_ram_size > old_ram_size) {
1615 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001616 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001617 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001618 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1619 * QLIST (which has an RCU-friendly variant) does not have insertion at
1620 * tail, so save the last element in last_block.
1621 */
Mike Day0dc3f442013-09-05 14:41:35 -04001622 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001623 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001624 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001625 break;
1626 }
1627 }
1628 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001629 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001630 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001631 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001632 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001633 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001634 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001635 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001636
Mike Day0dc3f442013-09-05 14:41:35 -04001637 /* Write list before version */
1638 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001639 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001640 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001641
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001642 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001643 new_block->used_length,
1644 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001645
Paolo Bonzinia904c912015-01-21 16:18:35 +01001646 if (new_block->host) {
1647 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1648 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1649 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1650 if (kvm_enabled()) {
1651 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1652 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001653 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001654}
1655
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001656#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001657RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1658 bool share, const char *mem_path,
1659 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001660{
1661 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001662 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001663
1664 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001665 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001666 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001667 }
1668
1669 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1670 /*
1671 * file_ram_alloc() needs to allocate just like
1672 * phys_mem_alloc, but we haven't bothered to provide
1673 * a hook there.
1674 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001675 error_setg(errp,
1676 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001677 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001678 }
1679
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001680 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001681 new_block = g_malloc0(sizeof(*new_block));
1682 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001683 new_block->used_length = size;
1684 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001685 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001686 new_block->host = file_ram_alloc(new_block, size,
1687 mem_path, errp);
1688 if (!new_block->host) {
1689 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001690 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001691 }
1692
Fam Zheng528f46a2016-03-01 14:18:18 +08001693 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001694 if (local_err) {
1695 g_free(new_block);
1696 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001697 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001698 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001699 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001700}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001701#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001702
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001703static
Fam Zheng528f46a2016-03-01 14:18:18 +08001704RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1705 void (*resized)(const char*,
1706 uint64_t length,
1707 void *host),
1708 void *host, bool resizeable,
1709 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001710{
1711 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001712 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001713
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001714 size = HOST_PAGE_ALIGN(size);
1715 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001716 new_block = g_malloc0(sizeof(*new_block));
1717 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001718 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001719 new_block->used_length = size;
1720 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001721 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001722 new_block->fd = -1;
1723 new_block->host = host;
1724 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001725 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001726 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001727 if (resizeable) {
1728 new_block->flags |= RAM_RESIZEABLE;
1729 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001730 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001731 if (local_err) {
1732 g_free(new_block);
1733 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001734 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001735 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001736 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001737}
1738
Fam Zheng528f46a2016-03-01 14:18:18 +08001739RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001740 MemoryRegion *mr, Error **errp)
1741{
1742 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1743}
1744
Fam Zheng528f46a2016-03-01 14:18:18 +08001745RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001746{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001747 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1748}
1749
Fam Zheng528f46a2016-03-01 14:18:18 +08001750RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001751 void (*resized)(const char*,
1752 uint64_t length,
1753 void *host),
1754 MemoryRegion *mr, Error **errp)
1755{
1756 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001757}
bellarde9a1ab12007-02-08 23:08:38 +00001758
Paolo Bonzini43771532013-09-09 17:58:40 +02001759static void reclaim_ramblock(RAMBlock *block)
1760{
1761 if (block->flags & RAM_PREALLOC) {
1762 ;
1763 } else if (xen_enabled()) {
1764 xen_invalidate_map_cache_entry(block->host);
1765#ifndef _WIN32
1766 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001767 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001768 close(block->fd);
1769#endif
1770 } else {
1771 qemu_anon_ram_free(block->host, block->max_length);
1772 }
1773 g_free(block);
1774}
1775
Fam Zhengf1060c52016-03-01 14:18:22 +08001776void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001777{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001778 if (!block) {
1779 return;
1780 }
1781
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001782 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001783 QLIST_REMOVE_RCU(block, next);
1784 ram_list.mru_block = NULL;
1785 /* Write list before version */
1786 smp_wmb();
1787 ram_list.version++;
1788 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001789 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001790}
1791
Huang Yingcd19cfa2011-03-02 08:56:19 +01001792#ifndef _WIN32
1793void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1794{
1795 RAMBlock *block;
1796 ram_addr_t offset;
1797 int flags;
1798 void *area, *vaddr;
1799
Mike Day0dc3f442013-09-05 14:41:35 -04001800 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001801 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001802 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001803 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001804 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001805 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001806 } else if (xen_enabled()) {
1807 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001808 } else {
1809 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001810 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001811 flags |= (block->flags & RAM_SHARED ?
1812 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001813 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1814 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001815 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001816 /*
1817 * Remap needs to match alloc. Accelerators that
1818 * set phys_mem_alloc never remap. If they did,
1819 * we'd need a remap hook here.
1820 */
1821 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1822
Huang Yingcd19cfa2011-03-02 08:56:19 +01001823 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1824 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1825 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001826 }
1827 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001828 fprintf(stderr, "Could not remap addr: "
1829 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001830 length, addr);
1831 exit(1);
1832 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001833 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001834 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001835 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001836 }
1837 }
1838}
1839#endif /* !_WIN32 */
1840
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001841int qemu_get_ram_fd(ram_addr_t addr)
1842{
Mike Dayae3a7042013-09-05 14:41:35 -04001843 RAMBlock *block;
1844 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001845
Mike Day0dc3f442013-09-05 14:41:35 -04001846 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001847 block = qemu_get_ram_block(addr);
1848 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001849 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001850 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001851}
1852
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001853void qemu_set_ram_fd(ram_addr_t addr, int fd)
1854{
1855 RAMBlock *block;
1856
1857 rcu_read_lock();
1858 block = qemu_get_ram_block(addr);
1859 block->fd = fd;
1860 rcu_read_unlock();
1861}
1862
Damjan Marion3fd74b82014-06-26 23:01:32 +02001863void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1864{
Mike Dayae3a7042013-09-05 14:41:35 -04001865 RAMBlock *block;
1866 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001867
Mike Day0dc3f442013-09-05 14:41:35 -04001868 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001869 block = qemu_get_ram_block(addr);
1870 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001871 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001872 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001873}
1874
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001875/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001876 * This should not be used for general purpose DMA. Use address_space_map
1877 * or address_space_rw instead. For local memory (e.g. video ram) that the
1878 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001879 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001880 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001881 */
Gonglei3655cb92016-02-20 10:35:20 +08001882void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001883{
Gonglei3655cb92016-02-20 10:35:20 +08001884 RAMBlock *block = ram_block;
1885
1886 if (block == NULL) {
1887 block = qemu_get_ram_block(addr);
1888 }
Mike Dayae3a7042013-09-05 14:41:35 -04001889
1890 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001891 /* We need to check if the requested address is in the RAM
1892 * because we don't want to map the entire memory in QEMU.
1893 * In that case just map until the end of the page.
1894 */
1895 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001896 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001897 }
Mike Dayae3a7042013-09-05 14:41:35 -04001898
1899 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001900 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001901 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001902}
1903
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001904/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001905 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001906 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001907 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001908 */
Gonglei3655cb92016-02-20 10:35:20 +08001909static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1910 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001911{
Gonglei3655cb92016-02-20 10:35:20 +08001912 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001913 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001914 if (*size == 0) {
1915 return NULL;
1916 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001917
Gonglei3655cb92016-02-20 10:35:20 +08001918 if (block == NULL) {
1919 block = qemu_get_ram_block(addr);
1920 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001921 offset_inside_block = addr - block->offset;
1922 *size = MIN(*size, block->max_length - offset_inside_block);
1923
1924 if (xen_enabled() && block->host == NULL) {
1925 /* We need to check if the requested address is in the RAM
1926 * because we don't want to map the entire memory in QEMU.
1927 * In that case just map the requested area.
1928 */
1929 if (block->offset == 0) {
1930 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001931 }
1932
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001933 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001934 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001935
1936 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001937}
1938
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001939/*
1940 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1941 * in that RAMBlock.
1942 *
1943 * ptr: Host pointer to look up
1944 * round_offset: If true round the result offset down to a page boundary
1945 * *ram_addr: set to result ram_addr
1946 * *offset: set to result offset within the RAMBlock
1947 *
1948 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001949 *
1950 * By the time this function returns, the returned pointer is not protected
1951 * by RCU anymore. If the caller is not within an RCU critical section and
1952 * does not hold the iothread lock, it must have other means of protecting the
1953 * pointer, such as a reference to the region that includes the incoming
1954 * ram_addr_t.
1955 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001956RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1957 ram_addr_t *ram_addr,
1958 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001959{
pbrook94a6b542009-04-11 17:15:54 +00001960 RAMBlock *block;
1961 uint8_t *host = ptr;
1962
Jan Kiszka868bb332011-06-21 22:59:09 +02001963 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001964 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001965 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001966 block = qemu_get_ram_block(*ram_addr);
1967 if (block) {
1968 *offset = (host - block->host);
1969 }
Mike Day0dc3f442013-09-05 14:41:35 -04001970 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001971 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001972 }
1973
Mike Day0dc3f442013-09-05 14:41:35 -04001974 rcu_read_lock();
1975 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001976 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001977 goto found;
1978 }
1979
Mike Day0dc3f442013-09-05 14:41:35 -04001980 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001981 /* This case append when the block is not mapped. */
1982 if (block->host == NULL) {
1983 continue;
1984 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001985 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001986 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001987 }
pbrook94a6b542009-04-11 17:15:54 +00001988 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001989
Mike Day0dc3f442013-09-05 14:41:35 -04001990 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001991 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001992
1993found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001994 *offset = (host - block->host);
1995 if (round_offset) {
1996 *offset &= TARGET_PAGE_MASK;
1997 }
1998 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001999 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002000 return block;
2001}
2002
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002003/*
2004 * Finds the named RAMBlock
2005 *
2006 * name: The name of RAMBlock to find
2007 *
2008 * Returns: RAMBlock (or NULL if not found)
2009 */
2010RAMBlock *qemu_ram_block_by_name(const char *name)
2011{
2012 RAMBlock *block;
2013
2014 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2015 if (!strcmp(name, block->idstr)) {
2016 return block;
2017 }
2018 }
2019
2020 return NULL;
2021}
2022
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002023/* Some of the softmmu routines need to translate from a host pointer
2024 (typically a TLB entry) back to a ram offset. */
2025MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2026{
2027 RAMBlock *block;
2028 ram_addr_t offset; /* Not used */
2029
2030 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2031
2032 if (!block) {
2033 return NULL;
2034 }
2035
2036 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002037}
Alex Williamsonf471a172010-06-11 11:11:42 -06002038
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002039/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002040static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002041 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002042{
Juan Quintela52159192013-10-08 12:44:04 +02002043 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002044 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002045 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002046 switch (size) {
2047 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002048 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002049 break;
2050 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002051 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002052 break;
2053 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002054 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002055 break;
2056 default:
2057 abort();
2058 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002059 /* Set both VGA and migration bits for simplicity and to remove
2060 * the notdirty callback faster.
2061 */
2062 cpu_physical_memory_set_dirty_range(ram_addr, size,
2063 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002064 /* we remove the notdirty callback only if the code has been
2065 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002066 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002067 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002068 }
bellard1ccde1c2004-02-06 19:46:14 +00002069}
2070
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002071static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2072 unsigned size, bool is_write)
2073{
2074 return is_write;
2075}
2076
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002077static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002078 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002079 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002080 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002081};
2082
pbrook0f459d12008-06-09 00:20:13 +00002083/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002084static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002085{
Andreas Färber93afead2013-08-26 03:41:01 +02002086 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002087 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002088 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002089 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002090 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002091 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002092 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002093
Andreas Färberff4700b2013-08-26 18:23:18 +02002094 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002095 /* We re-entered the check after replacing the TB. Now raise
2096 * the debug interrupt so that is will trigger after the
2097 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002098 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002099 return;
2100 }
Andreas Färber93afead2013-08-26 03:41:01 +02002101 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002102 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002103 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2104 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002105 if (flags == BP_MEM_READ) {
2106 wp->flags |= BP_WATCHPOINT_HIT_READ;
2107 } else {
2108 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2109 }
2110 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002111 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002112 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002113 if (wp->flags & BP_CPU &&
2114 !cc->debug_check_watchpoint(cpu, wp)) {
2115 wp->flags &= ~BP_WATCHPOINT_HIT;
2116 continue;
2117 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002118 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002119 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002120 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002121 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002122 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002123 } else {
2124 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002125 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002126 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002127 }
aliguori06d55cc2008-11-18 20:24:06 +00002128 }
aliguori6e140f22008-11-18 20:37:55 +00002129 } else {
2130 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002131 }
2132 }
2133}
2134
pbrook6658ffb2007-03-16 23:58:11 +00002135/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2136 so these check for a hit then pass through to the normal out-of-line
2137 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002138static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2139 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002140{
Peter Maydell66b9b432015-04-26 16:49:24 +01002141 MemTxResult res;
2142 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002143 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2144 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002145
Peter Maydell66b9b432015-04-26 16:49:24 +01002146 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002147 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002148 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002149 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002150 break;
2151 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002152 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002153 break;
2154 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002155 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002156 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002157 default: abort();
2158 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002159 *pdata = data;
2160 return res;
2161}
2162
2163static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2164 uint64_t val, unsigned size,
2165 MemTxAttrs attrs)
2166{
2167 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002168 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2169 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002170
2171 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2172 switch (size) {
2173 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002174 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002175 break;
2176 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002177 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002178 break;
2179 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002180 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002181 break;
2182 default: abort();
2183 }
2184 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002185}
2186
Avi Kivity1ec9b902012-01-02 12:47:48 +02002187static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002188 .read_with_attrs = watch_mem_read,
2189 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002190 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002191};
pbrook6658ffb2007-03-16 23:58:11 +00002192
Peter Maydellf25a49e2015-04-26 16:49:24 +01002193static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2194 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002195{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002196 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002197 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002198 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002199
blueswir1db7b5422007-05-26 17:36:03 +00002200#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002201 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002202 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002203#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002204 res = address_space_read(subpage->as, addr + subpage->base,
2205 attrs, buf, len);
2206 if (res) {
2207 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002208 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002209 switch (len) {
2210 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002211 *data = ldub_p(buf);
2212 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002213 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002214 *data = lduw_p(buf);
2215 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002216 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002217 *data = ldl_p(buf);
2218 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002219 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002220 *data = ldq_p(buf);
2221 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002222 default:
2223 abort();
2224 }
blueswir1db7b5422007-05-26 17:36:03 +00002225}
2226
Peter Maydellf25a49e2015-04-26 16:49:24 +01002227static MemTxResult subpage_write(void *opaque, hwaddr addr,
2228 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002229{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002230 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002231 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002232
blueswir1db7b5422007-05-26 17:36:03 +00002233#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002234 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002235 " value %"PRIx64"\n",
2236 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002237#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002238 switch (len) {
2239 case 1:
2240 stb_p(buf, value);
2241 break;
2242 case 2:
2243 stw_p(buf, value);
2244 break;
2245 case 4:
2246 stl_p(buf, value);
2247 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002248 case 8:
2249 stq_p(buf, value);
2250 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002251 default:
2252 abort();
2253 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002254 return address_space_write(subpage->as, addr + subpage->base,
2255 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002256}
2257
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002258static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002259 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002260{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002261 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002262#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002263 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002264 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002265#endif
2266
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002267 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002268 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002269}
2270
Avi Kivity70c68e42012-01-02 12:32:48 +02002271static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002272 .read_with_attrs = subpage_read,
2273 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002274 .impl.min_access_size = 1,
2275 .impl.max_access_size = 8,
2276 .valid.min_access_size = 1,
2277 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002278 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002279 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002280};
2281
Anthony Liguoric227f092009-10-01 16:12:16 -05002282static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002283 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002284{
2285 int idx, eidx;
2286
2287 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2288 return -1;
2289 idx = SUBPAGE_IDX(start);
2290 eidx = SUBPAGE_IDX(end);
2291#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002292 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2293 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002294#endif
blueswir1db7b5422007-05-26 17:36:03 +00002295 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002296 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002297 }
2298
2299 return 0;
2300}
2301
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002302static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002303{
Anthony Liguoric227f092009-10-01 16:12:16 -05002304 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002305
Anthony Liguori7267c092011-08-20 22:09:37 -05002306 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002307
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002308 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002309 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002310 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002311 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002312 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002313#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002314 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2315 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002316#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002317 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002318
2319 return mmio;
2320}
2321
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002322static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2323 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002324{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002325 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002326 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002327 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002328 .mr = mr,
2329 .offset_within_address_space = 0,
2330 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002331 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002332 };
2333
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002334 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002335}
2336
Peter Maydella54c87b2016-01-21 14:15:05 +00002337MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002338{
Peter Maydella54c87b2016-01-21 14:15:05 +00002339 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2340 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002341 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002342 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002343
2344 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002345}
2346
Avi Kivitye9179ce2009-06-14 11:38:52 +03002347static void io_mem_init(void)
2348{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002349 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002350 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002351 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002352 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002353 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002354 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002355 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002356}
2357
Avi Kivityac1970f2012-10-03 16:22:53 +02002358static void mem_begin(MemoryListener *listener)
2359{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002360 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002361 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2362 uint16_t n;
2363
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002364 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002365 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002366 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002367 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002368 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002369 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002370 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002371 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002372
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002373 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002374 d->as = as;
2375 as->next_dispatch = d;
2376}
2377
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002378static void address_space_dispatch_free(AddressSpaceDispatch *d)
2379{
2380 phys_sections_free(&d->map);
2381 g_free(d);
2382}
2383
Paolo Bonzini00752702013-05-29 12:13:54 +02002384static void mem_commit(MemoryListener *listener)
2385{
2386 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002387 AddressSpaceDispatch *cur = as->dispatch;
2388 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002389
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002390 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002391
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002392 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002393 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002394 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002395 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002396}
2397
Avi Kivity1d711482012-10-02 18:54:45 +02002398static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002399{
Peter Maydell32857f42015-10-01 15:29:50 +01002400 CPUAddressSpace *cpuas;
2401 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002402
2403 /* since each CPU stores ram addresses in its TLB cache, we must
2404 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002405 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2406 cpu_reloading_memory_map();
2407 /* The CPU and TLB are protected by the iothread lock.
2408 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2409 * may have split the RCU critical section.
2410 */
2411 d = atomic_rcu_read(&cpuas->as->dispatch);
2412 cpuas->memory_dispatch = d;
2413 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002414}
2415
Avi Kivityac1970f2012-10-03 16:22:53 +02002416void address_space_init_dispatch(AddressSpace *as)
2417{
Paolo Bonzini00752702013-05-29 12:13:54 +02002418 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002419 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002420 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002421 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002422 .region_add = mem_add,
2423 .region_nop = mem_add,
2424 .priority = 0,
2425 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002426 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002427}
2428
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002429void address_space_unregister(AddressSpace *as)
2430{
2431 memory_listener_unregister(&as->dispatch_listener);
2432}
2433
Avi Kivity83f3c252012-10-07 12:59:55 +02002434void address_space_destroy_dispatch(AddressSpace *as)
2435{
2436 AddressSpaceDispatch *d = as->dispatch;
2437
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002438 atomic_rcu_set(&as->dispatch, NULL);
2439 if (d) {
2440 call_rcu(d, address_space_dispatch_free, rcu);
2441 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002442}
2443
Avi Kivity62152b82011-07-26 14:26:14 +03002444static void memory_map_init(void)
2445{
Anthony Liguori7267c092011-08-20 22:09:37 -05002446 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002447
Paolo Bonzini57271d62013-11-07 17:14:37 +01002448 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002449 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002450
Anthony Liguori7267c092011-08-20 22:09:37 -05002451 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002452 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2453 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002454 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002455}
2456
2457MemoryRegion *get_system_memory(void)
2458{
2459 return system_memory;
2460}
2461
Avi Kivity309cb472011-08-08 16:09:03 +03002462MemoryRegion *get_system_io(void)
2463{
2464 return system_io;
2465}
2466
pbrooke2eef172008-06-08 01:09:01 +00002467#endif /* !defined(CONFIG_USER_ONLY) */
2468
bellard13eb76e2004-01-24 15:23:36 +00002469/* physical memory access (slow version, mainly for debug) */
2470#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002471int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002472 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002473{
2474 int l, flags;
2475 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002476 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002477
2478 while (len > 0) {
2479 page = addr & TARGET_PAGE_MASK;
2480 l = (page + TARGET_PAGE_SIZE) - addr;
2481 if (l > len)
2482 l = len;
2483 flags = page_get_flags(page);
2484 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002485 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002486 if (is_write) {
2487 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002488 return -1;
bellard579a97f2007-11-11 14:26:47 +00002489 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002490 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002491 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002492 memcpy(p, buf, l);
2493 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002494 } else {
2495 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002496 return -1;
bellard579a97f2007-11-11 14:26:47 +00002497 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002498 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002499 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002500 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002501 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002502 }
2503 len -= l;
2504 buf += l;
2505 addr += l;
2506 }
Paul Brooka68fe892010-03-01 00:08:59 +00002507 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002508}
bellard8df1cd02005-01-28 22:37:22 +00002509
bellard13eb76e2004-01-24 15:23:36 +00002510#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002511
Paolo Bonzini845b6212015-03-23 11:45:53 +01002512static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002513 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002514{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002515 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2516 /* No early return if dirty_log_mask is or becomes 0, because
2517 * cpu_physical_memory_set_dirty_range will still call
2518 * xen_modified_memory.
2519 */
2520 if (dirty_log_mask) {
2521 dirty_log_mask =
2522 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002523 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002524 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2525 tb_invalidate_phys_range(addr, addr + length);
2526 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2527 }
2528 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002529}
2530
Richard Henderson23326162013-07-08 14:55:59 -07002531static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002532{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002533 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002534
2535 /* Regions are assumed to support 1-4 byte accesses unless
2536 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002537 if (access_size_max == 0) {
2538 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002539 }
Richard Henderson23326162013-07-08 14:55:59 -07002540
2541 /* Bound the maximum access by the alignment of the address. */
2542 if (!mr->ops->impl.unaligned) {
2543 unsigned align_size_max = addr & -addr;
2544 if (align_size_max != 0 && align_size_max < access_size_max) {
2545 access_size_max = align_size_max;
2546 }
2547 }
2548
2549 /* Don't attempt accesses larger than the maximum. */
2550 if (l > access_size_max) {
2551 l = access_size_max;
2552 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002553 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002554
2555 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002556}
2557
Jan Kiszka4840f102015-06-18 18:47:22 +02002558static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002559{
Jan Kiszka4840f102015-06-18 18:47:22 +02002560 bool unlocked = !qemu_mutex_iothread_locked();
2561 bool release_lock = false;
2562
2563 if (unlocked && mr->global_locking) {
2564 qemu_mutex_lock_iothread();
2565 unlocked = false;
2566 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002567 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002568 if (mr->flush_coalesced_mmio) {
2569 if (unlocked) {
2570 qemu_mutex_lock_iothread();
2571 }
2572 qemu_flush_coalesced_mmio_buffer();
2573 if (unlocked) {
2574 qemu_mutex_unlock_iothread();
2575 }
2576 }
2577
2578 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002579}
2580
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002581/* Called within RCU critical section. */
2582static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2583 MemTxAttrs attrs,
2584 const uint8_t *buf,
2585 int len, hwaddr addr1,
2586 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002587{
bellard13eb76e2004-01-24 15:23:36 +00002588 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002589 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002590 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002591 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002592
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002593 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002594 if (!memory_access_is_direct(mr, true)) {
2595 release_lock |= prepare_mmio_access(mr);
2596 l = memory_access_size(mr, l, addr1);
2597 /* XXX: could force current_cpu to NULL to avoid
2598 potential bugs */
2599 switch (l) {
2600 case 8:
2601 /* 64 bit write access */
2602 val = ldq_p(buf);
2603 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2604 attrs);
2605 break;
2606 case 4:
2607 /* 32 bit write access */
2608 val = ldl_p(buf);
2609 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2610 attrs);
2611 break;
2612 case 2:
2613 /* 16 bit write access */
2614 val = lduw_p(buf);
2615 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2616 attrs);
2617 break;
2618 case 1:
2619 /* 8 bit write access */
2620 val = ldub_p(buf);
2621 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2622 attrs);
2623 break;
2624 default:
2625 abort();
bellard13eb76e2004-01-24 15:23:36 +00002626 }
2627 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002628 addr1 += memory_region_get_ram_addr(mr);
2629 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002630 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002631 memcpy(ptr, buf, l);
2632 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002633 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002634
2635 if (release_lock) {
2636 qemu_mutex_unlock_iothread();
2637 release_lock = false;
2638 }
2639
bellard13eb76e2004-01-24 15:23:36 +00002640 len -= l;
2641 buf += l;
2642 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002643
2644 if (!len) {
2645 break;
2646 }
2647
2648 l = len;
2649 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002650 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002651
Peter Maydell3b643492015-04-26 16:49:23 +01002652 return result;
bellard13eb76e2004-01-24 15:23:36 +00002653}
bellard8df1cd02005-01-28 22:37:22 +00002654
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002655MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2656 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002657{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002658 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002659 hwaddr addr1;
2660 MemoryRegion *mr;
2661 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002662
2663 if (len > 0) {
2664 rcu_read_lock();
2665 l = len;
2666 mr = address_space_translate(as, addr, &addr1, &l, true);
2667 result = address_space_write_continue(as, addr, attrs, buf, len,
2668 addr1, l, mr);
2669 rcu_read_unlock();
2670 }
2671
2672 return result;
2673}
2674
2675/* Called within RCU critical section. */
2676MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2677 MemTxAttrs attrs, uint8_t *buf,
2678 int len, hwaddr addr1, hwaddr l,
2679 MemoryRegion *mr)
2680{
2681 uint8_t *ptr;
2682 uint64_t val;
2683 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002684 bool release_lock = false;
2685
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002686 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002687 if (!memory_access_is_direct(mr, false)) {
2688 /* I/O case */
2689 release_lock |= prepare_mmio_access(mr);
2690 l = memory_access_size(mr, l, addr1);
2691 switch (l) {
2692 case 8:
2693 /* 64 bit read access */
2694 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2695 attrs);
2696 stq_p(buf, val);
2697 break;
2698 case 4:
2699 /* 32 bit read access */
2700 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2701 attrs);
2702 stl_p(buf, val);
2703 break;
2704 case 2:
2705 /* 16 bit read access */
2706 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2707 attrs);
2708 stw_p(buf, val);
2709 break;
2710 case 1:
2711 /* 8 bit read access */
2712 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2713 attrs);
2714 stb_p(buf, val);
2715 break;
2716 default:
2717 abort();
2718 }
2719 } else {
2720 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002721 ptr = qemu_get_ram_ptr(mr->ram_block,
2722 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002723 memcpy(buf, ptr, l);
2724 }
2725
2726 if (release_lock) {
2727 qemu_mutex_unlock_iothread();
2728 release_lock = false;
2729 }
2730
2731 len -= l;
2732 buf += l;
2733 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002734
2735 if (!len) {
2736 break;
2737 }
2738
2739 l = len;
2740 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002741 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002742
2743 return result;
2744}
2745
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002746MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2747 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002748{
2749 hwaddr l;
2750 hwaddr addr1;
2751 MemoryRegion *mr;
2752 MemTxResult result = MEMTX_OK;
2753
2754 if (len > 0) {
2755 rcu_read_lock();
2756 l = len;
2757 mr = address_space_translate(as, addr, &addr1, &l, false);
2758 result = address_space_read_continue(as, addr, attrs, buf, len,
2759 addr1, l, mr);
2760 rcu_read_unlock();
2761 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002762
2763 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002764}
2765
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002766MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2767 uint8_t *buf, int len, bool is_write)
2768{
2769 if (is_write) {
2770 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2771 } else {
2772 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2773 }
2774}
Avi Kivityac1970f2012-10-03 16:22:53 +02002775
Avi Kivitya8170e52012-10-23 12:30:10 +02002776void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002777 int len, int is_write)
2778{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002779 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2780 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002781}
2782
Alexander Graf582b55a2013-12-11 14:17:44 +01002783enum write_rom_type {
2784 WRITE_DATA,
2785 FLUSH_CACHE,
2786};
2787
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002788static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002789 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002790{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002791 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002792 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002793 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002794 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002795
Paolo Bonzini41063e12015-03-18 14:21:43 +01002796 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002797 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002798 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002799 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002800
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002801 if (!(memory_region_is_ram(mr) ||
2802 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002803 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002804 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002805 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002806 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002807 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002808 switch (type) {
2809 case WRITE_DATA:
2810 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002811 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002812 break;
2813 case FLUSH_CACHE:
2814 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2815 break;
2816 }
bellardd0ecd2a2006-04-23 17:14:48 +00002817 }
2818 len -= l;
2819 buf += l;
2820 addr += l;
2821 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002822 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002823}
2824
Alexander Graf582b55a2013-12-11 14:17:44 +01002825/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002826void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002827 const uint8_t *buf, int len)
2828{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002829 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002830}
2831
2832void cpu_flush_icache_range(hwaddr start, int len)
2833{
2834 /*
2835 * This function should do the same thing as an icache flush that was
2836 * triggered from within the guest. For TCG we are always cache coherent,
2837 * so there is no need to flush anything. For KVM / Xen we need to flush
2838 * the host's instruction cache at least.
2839 */
2840 if (tcg_enabled()) {
2841 return;
2842 }
2843
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002844 cpu_physical_memory_write_rom_internal(&address_space_memory,
2845 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002846}
2847
aliguori6d16c2f2009-01-22 16:59:11 +00002848typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002849 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002850 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002851 hwaddr addr;
2852 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002853 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002854} BounceBuffer;
2855
2856static BounceBuffer bounce;
2857
aliguoriba223c22009-01-22 16:59:16 +00002858typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002859 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002860 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002861} MapClient;
2862
Fam Zheng38e047b2015-03-16 17:03:35 +08002863QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002864static QLIST_HEAD(map_client_list, MapClient) map_client_list
2865 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002866
Fam Zhenge95205e2015-03-16 17:03:37 +08002867static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002868{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002869 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002870 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002871}
2872
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002873static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002874{
2875 MapClient *client;
2876
Blue Swirl72cf2d42009-09-12 07:36:22 +00002877 while (!QLIST_EMPTY(&map_client_list)) {
2878 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002879 qemu_bh_schedule(client->bh);
2880 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002881 }
2882}
2883
Fam Zhenge95205e2015-03-16 17:03:37 +08002884void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002885{
2886 MapClient *client = g_malloc(sizeof(*client));
2887
Fam Zheng38e047b2015-03-16 17:03:35 +08002888 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002889 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002890 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002891 if (!atomic_read(&bounce.in_use)) {
2892 cpu_notify_map_clients_locked();
2893 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002894 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002895}
2896
Fam Zheng38e047b2015-03-16 17:03:35 +08002897void cpu_exec_init_all(void)
2898{
2899 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002900 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002901 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002902 qemu_mutex_init(&map_client_list_lock);
2903}
2904
Fam Zhenge95205e2015-03-16 17:03:37 +08002905void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002906{
Fam Zhenge95205e2015-03-16 17:03:37 +08002907 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002908
Fam Zhenge95205e2015-03-16 17:03:37 +08002909 qemu_mutex_lock(&map_client_list_lock);
2910 QLIST_FOREACH(client, &map_client_list, link) {
2911 if (client->bh == bh) {
2912 cpu_unregister_map_client_do(client);
2913 break;
2914 }
2915 }
2916 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002917}
2918
2919static void cpu_notify_map_clients(void)
2920{
Fam Zheng38e047b2015-03-16 17:03:35 +08002921 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002922 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002923 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002924}
2925
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002926bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2927{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002928 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002929 hwaddr l, xlat;
2930
Paolo Bonzini41063e12015-03-18 14:21:43 +01002931 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002932 while (len > 0) {
2933 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002934 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2935 if (!memory_access_is_direct(mr, is_write)) {
2936 l = memory_access_size(mr, l, addr);
2937 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002938 return false;
2939 }
2940 }
2941
2942 len -= l;
2943 addr += l;
2944 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002945 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002946 return true;
2947}
2948
aliguori6d16c2f2009-01-22 16:59:11 +00002949/* Map a physical memory region into a host virtual address.
2950 * May map a subset of the requested range, given by and returned in *plen.
2951 * May return NULL if resources needed to perform the mapping are exhausted.
2952 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002953 * Use cpu_register_map_client() to know when retrying the map operation is
2954 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002955 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002956void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002957 hwaddr addr,
2958 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002959 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002960{
Avi Kivitya8170e52012-10-23 12:30:10 +02002961 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002962 hwaddr done = 0;
2963 hwaddr l, xlat, base;
2964 MemoryRegion *mr, *this_mr;
2965 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002966 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002967
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002968 if (len == 0) {
2969 return NULL;
2970 }
aliguori6d16c2f2009-01-22 16:59:11 +00002971
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002972 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002973 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002974 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002975
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002976 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002977 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002978 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002979 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002980 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002981 /* Avoid unbounded allocations */
2982 l = MIN(l, TARGET_PAGE_SIZE);
2983 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002984 bounce.addr = addr;
2985 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002986
2987 memory_region_ref(mr);
2988 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002989 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002990 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2991 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002992 }
aliguori6d16c2f2009-01-22 16:59:11 +00002993
Paolo Bonzini41063e12015-03-18 14:21:43 +01002994 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002995 *plen = l;
2996 return bounce.buffer;
2997 }
2998
2999 base = xlat;
3000 raddr = memory_region_get_ram_addr(mr);
3001
3002 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00003003 len -= l;
3004 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003005 done += l;
3006 if (len == 0) {
3007 break;
3008 }
3009
3010 l = len;
3011 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3012 if (this_mr != mr || xlat != base + done) {
3013 break;
3014 }
aliguori6d16c2f2009-01-22 16:59:11 +00003015 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003016
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003017 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003018 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003019 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003020 rcu_read_unlock();
3021
3022 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003023}
3024
Avi Kivityac1970f2012-10-03 16:22:53 +02003025/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003026 * Will also mark the memory as dirty if is_write == 1. access_len gives
3027 * the amount of memory that was actually read or written by the caller.
3028 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003029void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3030 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003031{
3032 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003033 MemoryRegion *mr;
3034 ram_addr_t addr1;
3035
3036 mr = qemu_ram_addr_from_host(buffer, &addr1);
3037 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003038 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003039 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003040 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003041 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003042 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003043 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003044 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003045 return;
3046 }
3047 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003048 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3049 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003050 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003051 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003052 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003053 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003054 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003055 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003056}
bellardd0ecd2a2006-04-23 17:14:48 +00003057
Avi Kivitya8170e52012-10-23 12:30:10 +02003058void *cpu_physical_memory_map(hwaddr addr,
3059 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003060 int is_write)
3061{
3062 return address_space_map(&address_space_memory, addr, plen, is_write);
3063}
3064
Avi Kivitya8170e52012-10-23 12:30:10 +02003065void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3066 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003067{
3068 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3069}
3070
bellard8df1cd02005-01-28 22:37:22 +00003071/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003072static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3073 MemTxAttrs attrs,
3074 MemTxResult *result,
3075 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003076{
bellard8df1cd02005-01-28 22:37:22 +00003077 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003078 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003079 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003080 hwaddr l = 4;
3081 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003082 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003083 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003084
Paolo Bonzini41063e12015-03-18 14:21:43 +01003085 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003086 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003087 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003088 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003089
bellard8df1cd02005-01-28 22:37:22 +00003090 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003091 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003092#if defined(TARGET_WORDS_BIGENDIAN)
3093 if (endian == DEVICE_LITTLE_ENDIAN) {
3094 val = bswap32(val);
3095 }
3096#else
3097 if (endian == DEVICE_BIG_ENDIAN) {
3098 val = bswap32(val);
3099 }
3100#endif
bellard8df1cd02005-01-28 22:37:22 +00003101 } else {
3102 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003103 ptr = qemu_get_ram_ptr(mr->ram_block,
3104 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003105 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003106 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003107 switch (endian) {
3108 case DEVICE_LITTLE_ENDIAN:
3109 val = ldl_le_p(ptr);
3110 break;
3111 case DEVICE_BIG_ENDIAN:
3112 val = ldl_be_p(ptr);
3113 break;
3114 default:
3115 val = ldl_p(ptr);
3116 break;
3117 }
Peter Maydell50013112015-04-26 16:49:24 +01003118 r = MEMTX_OK;
3119 }
3120 if (result) {
3121 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003122 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003123 if (release_lock) {
3124 qemu_mutex_unlock_iothread();
3125 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003126 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003127 return val;
3128}
3129
Peter Maydell50013112015-04-26 16:49:24 +01003130uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3131 MemTxAttrs attrs, MemTxResult *result)
3132{
3133 return address_space_ldl_internal(as, addr, attrs, result,
3134 DEVICE_NATIVE_ENDIAN);
3135}
3136
3137uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3138 MemTxAttrs attrs, MemTxResult *result)
3139{
3140 return address_space_ldl_internal(as, addr, attrs, result,
3141 DEVICE_LITTLE_ENDIAN);
3142}
3143
3144uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3145 MemTxAttrs attrs, MemTxResult *result)
3146{
3147 return address_space_ldl_internal(as, addr, attrs, result,
3148 DEVICE_BIG_ENDIAN);
3149}
3150
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003151uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003152{
Peter Maydell50013112015-04-26 16:49:24 +01003153 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003154}
3155
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003156uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003157{
Peter Maydell50013112015-04-26 16:49:24 +01003158 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003159}
3160
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003161uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003162{
Peter Maydell50013112015-04-26 16:49:24 +01003163 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003164}
3165
bellard84b7b8e2005-11-28 21:19:04 +00003166/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003167static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3168 MemTxAttrs attrs,
3169 MemTxResult *result,
3170 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003171{
bellard84b7b8e2005-11-28 21:19:04 +00003172 uint8_t *ptr;
3173 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003174 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003175 hwaddr l = 8;
3176 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003177 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003178 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003179
Paolo Bonzini41063e12015-03-18 14:21:43 +01003180 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003181 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003182 false);
3183 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003184 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003185
bellard84b7b8e2005-11-28 21:19:04 +00003186 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003187 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003188#if defined(TARGET_WORDS_BIGENDIAN)
3189 if (endian == DEVICE_LITTLE_ENDIAN) {
3190 val = bswap64(val);
3191 }
3192#else
3193 if (endian == DEVICE_BIG_ENDIAN) {
3194 val = bswap64(val);
3195 }
3196#endif
bellard84b7b8e2005-11-28 21:19:04 +00003197 } else {
3198 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003199 ptr = qemu_get_ram_ptr(mr->ram_block,
3200 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003201 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003202 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003203 switch (endian) {
3204 case DEVICE_LITTLE_ENDIAN:
3205 val = ldq_le_p(ptr);
3206 break;
3207 case DEVICE_BIG_ENDIAN:
3208 val = ldq_be_p(ptr);
3209 break;
3210 default:
3211 val = ldq_p(ptr);
3212 break;
3213 }
Peter Maydell50013112015-04-26 16:49:24 +01003214 r = MEMTX_OK;
3215 }
3216 if (result) {
3217 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003218 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003219 if (release_lock) {
3220 qemu_mutex_unlock_iothread();
3221 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003222 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003223 return val;
3224}
3225
Peter Maydell50013112015-04-26 16:49:24 +01003226uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3227 MemTxAttrs attrs, MemTxResult *result)
3228{
3229 return address_space_ldq_internal(as, addr, attrs, result,
3230 DEVICE_NATIVE_ENDIAN);
3231}
3232
3233uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3234 MemTxAttrs attrs, MemTxResult *result)
3235{
3236 return address_space_ldq_internal(as, addr, attrs, result,
3237 DEVICE_LITTLE_ENDIAN);
3238}
3239
3240uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3241 MemTxAttrs attrs, MemTxResult *result)
3242{
3243 return address_space_ldq_internal(as, addr, attrs, result,
3244 DEVICE_BIG_ENDIAN);
3245}
3246
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003247uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248{
Peter Maydell50013112015-04-26 16:49:24 +01003249 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250}
3251
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003252uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003253{
Peter Maydell50013112015-04-26 16:49:24 +01003254 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003255}
3256
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003257uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003258{
Peter Maydell50013112015-04-26 16:49:24 +01003259 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003260}
3261
bellardaab33092005-10-30 20:48:42 +00003262/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003263uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3264 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003265{
3266 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003267 MemTxResult r;
3268
3269 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3270 if (result) {
3271 *result = r;
3272 }
bellardaab33092005-10-30 20:48:42 +00003273 return val;
3274}
3275
Peter Maydell50013112015-04-26 16:49:24 +01003276uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3277{
3278 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3279}
3280
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003281/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003282static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3283 hwaddr addr,
3284 MemTxAttrs attrs,
3285 MemTxResult *result,
3286 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003287{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003288 uint8_t *ptr;
3289 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003290 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003291 hwaddr l = 2;
3292 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003293 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003294 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003295
Paolo Bonzini41063e12015-03-18 14:21:43 +01003296 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003297 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003298 false);
3299 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003300 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003301
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003302 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003303 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003304#if defined(TARGET_WORDS_BIGENDIAN)
3305 if (endian == DEVICE_LITTLE_ENDIAN) {
3306 val = bswap16(val);
3307 }
3308#else
3309 if (endian == DEVICE_BIG_ENDIAN) {
3310 val = bswap16(val);
3311 }
3312#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003313 } else {
3314 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003315 ptr = qemu_get_ram_ptr(mr->ram_block,
3316 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003317 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003318 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003319 switch (endian) {
3320 case DEVICE_LITTLE_ENDIAN:
3321 val = lduw_le_p(ptr);
3322 break;
3323 case DEVICE_BIG_ENDIAN:
3324 val = lduw_be_p(ptr);
3325 break;
3326 default:
3327 val = lduw_p(ptr);
3328 break;
3329 }
Peter Maydell50013112015-04-26 16:49:24 +01003330 r = MEMTX_OK;
3331 }
3332 if (result) {
3333 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003334 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003335 if (release_lock) {
3336 qemu_mutex_unlock_iothread();
3337 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003338 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003339 return val;
bellardaab33092005-10-30 20:48:42 +00003340}
3341
Peter Maydell50013112015-04-26 16:49:24 +01003342uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3343 MemTxAttrs attrs, MemTxResult *result)
3344{
3345 return address_space_lduw_internal(as, addr, attrs, result,
3346 DEVICE_NATIVE_ENDIAN);
3347}
3348
3349uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3350 MemTxAttrs attrs, MemTxResult *result)
3351{
3352 return address_space_lduw_internal(as, addr, attrs, result,
3353 DEVICE_LITTLE_ENDIAN);
3354}
3355
3356uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3357 MemTxAttrs attrs, MemTxResult *result)
3358{
3359 return address_space_lduw_internal(as, addr, attrs, result,
3360 DEVICE_BIG_ENDIAN);
3361}
3362
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003363uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003364{
Peter Maydell50013112015-04-26 16:49:24 +01003365 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003366}
3367
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003368uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003369{
Peter Maydell50013112015-04-26 16:49:24 +01003370 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003371}
3372
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003373uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003374{
Peter Maydell50013112015-04-26 16:49:24 +01003375 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003376}
3377
bellard8df1cd02005-01-28 22:37:22 +00003378/* warning: addr must be aligned. The ram page is not masked as dirty
3379 and the code inside is not invalidated. It is useful if the dirty
3380 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003381void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3382 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003383{
bellard8df1cd02005-01-28 22:37:22 +00003384 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003385 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003386 hwaddr l = 4;
3387 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003388 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003389 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003390 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003391
Paolo Bonzini41063e12015-03-18 14:21:43 +01003392 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003393 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003394 true);
3395 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003396 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003397
Peter Maydell50013112015-04-26 16:49:24 +01003398 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003399 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003400 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003401 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003402 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003403
Paolo Bonzini845b6212015-03-23 11:45:53 +01003404 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3405 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003406 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003407 r = MEMTX_OK;
3408 }
3409 if (result) {
3410 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003411 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003412 if (release_lock) {
3413 qemu_mutex_unlock_iothread();
3414 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003415 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003416}
3417
Peter Maydell50013112015-04-26 16:49:24 +01003418void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3419{
3420 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3421}
3422
bellard8df1cd02005-01-28 22:37:22 +00003423/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003424static inline void address_space_stl_internal(AddressSpace *as,
3425 hwaddr addr, uint32_t val,
3426 MemTxAttrs attrs,
3427 MemTxResult *result,
3428 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003429{
bellard8df1cd02005-01-28 22:37:22 +00003430 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003431 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003432 hwaddr l = 4;
3433 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003434 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003435 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003436
Paolo Bonzini41063e12015-03-18 14:21:43 +01003437 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003438 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003439 true);
3440 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003441 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003442
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003443#if defined(TARGET_WORDS_BIGENDIAN)
3444 if (endian == DEVICE_LITTLE_ENDIAN) {
3445 val = bswap32(val);
3446 }
3447#else
3448 if (endian == DEVICE_BIG_ENDIAN) {
3449 val = bswap32(val);
3450 }
3451#endif
Peter Maydell50013112015-04-26 16:49:24 +01003452 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003453 } else {
bellard8df1cd02005-01-28 22:37:22 +00003454 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003455 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003456 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003457 switch (endian) {
3458 case DEVICE_LITTLE_ENDIAN:
3459 stl_le_p(ptr, val);
3460 break;
3461 case DEVICE_BIG_ENDIAN:
3462 stl_be_p(ptr, val);
3463 break;
3464 default:
3465 stl_p(ptr, val);
3466 break;
3467 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003468 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003469 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003470 }
Peter Maydell50013112015-04-26 16:49:24 +01003471 if (result) {
3472 *result = r;
3473 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003474 if (release_lock) {
3475 qemu_mutex_unlock_iothread();
3476 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003477 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003478}
3479
3480void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3481 MemTxAttrs attrs, MemTxResult *result)
3482{
3483 address_space_stl_internal(as, addr, val, attrs, result,
3484 DEVICE_NATIVE_ENDIAN);
3485}
3486
3487void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3488 MemTxAttrs attrs, MemTxResult *result)
3489{
3490 address_space_stl_internal(as, addr, val, attrs, result,
3491 DEVICE_LITTLE_ENDIAN);
3492}
3493
3494void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3495 MemTxAttrs attrs, MemTxResult *result)
3496{
3497 address_space_stl_internal(as, addr, val, attrs, result,
3498 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003499}
3500
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003501void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502{
Peter Maydell50013112015-04-26 16:49:24 +01003503 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003504}
3505
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003506void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507{
Peter Maydell50013112015-04-26 16:49:24 +01003508 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003509}
3510
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003511void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003512{
Peter Maydell50013112015-04-26 16:49:24 +01003513 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003514}
3515
bellardaab33092005-10-30 20:48:42 +00003516/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003517void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3518 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003519{
3520 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003521 MemTxResult r;
3522
3523 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3524 if (result) {
3525 *result = r;
3526 }
3527}
3528
3529void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3530{
3531 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003532}
3533
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003534/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003535static inline void address_space_stw_internal(AddressSpace *as,
3536 hwaddr addr, uint32_t val,
3537 MemTxAttrs attrs,
3538 MemTxResult *result,
3539 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003540{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003541 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003542 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003543 hwaddr l = 2;
3544 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003545 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003546 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003547
Paolo Bonzini41063e12015-03-18 14:21:43 +01003548 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003549 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003550 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003551 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003552
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003553#if defined(TARGET_WORDS_BIGENDIAN)
3554 if (endian == DEVICE_LITTLE_ENDIAN) {
3555 val = bswap16(val);
3556 }
3557#else
3558 if (endian == DEVICE_BIG_ENDIAN) {
3559 val = bswap16(val);
3560 }
3561#endif
Peter Maydell50013112015-04-26 16:49:24 +01003562 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003563 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003564 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003565 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003566 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003567 switch (endian) {
3568 case DEVICE_LITTLE_ENDIAN:
3569 stw_le_p(ptr, val);
3570 break;
3571 case DEVICE_BIG_ENDIAN:
3572 stw_be_p(ptr, val);
3573 break;
3574 default:
3575 stw_p(ptr, val);
3576 break;
3577 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003578 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003579 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003580 }
Peter Maydell50013112015-04-26 16:49:24 +01003581 if (result) {
3582 *result = r;
3583 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003584 if (release_lock) {
3585 qemu_mutex_unlock_iothread();
3586 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003587 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003588}
3589
3590void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3591 MemTxAttrs attrs, MemTxResult *result)
3592{
3593 address_space_stw_internal(as, addr, val, attrs, result,
3594 DEVICE_NATIVE_ENDIAN);
3595}
3596
3597void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3598 MemTxAttrs attrs, MemTxResult *result)
3599{
3600 address_space_stw_internal(as, addr, val, attrs, result,
3601 DEVICE_LITTLE_ENDIAN);
3602}
3603
3604void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3605 MemTxAttrs attrs, MemTxResult *result)
3606{
3607 address_space_stw_internal(as, addr, val, attrs, result,
3608 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003609}
3610
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003611void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003612{
Peter Maydell50013112015-04-26 16:49:24 +01003613 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003614}
3615
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003616void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003617{
Peter Maydell50013112015-04-26 16:49:24 +01003618 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003619}
3620
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003621void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003622{
Peter Maydell50013112015-04-26 16:49:24 +01003623 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003624}
3625
bellardaab33092005-10-30 20:48:42 +00003626/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003627void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3628 MemTxAttrs attrs, MemTxResult *result)
3629{
3630 MemTxResult r;
3631 val = tswap64(val);
3632 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3633 if (result) {
3634 *result = r;
3635 }
3636}
3637
3638void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3639 MemTxAttrs attrs, MemTxResult *result)
3640{
3641 MemTxResult r;
3642 val = cpu_to_le64(val);
3643 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3644 if (result) {
3645 *result = r;
3646 }
3647}
3648void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3649 MemTxAttrs attrs, MemTxResult *result)
3650{
3651 MemTxResult r;
3652 val = cpu_to_be64(val);
3653 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3654 if (result) {
3655 *result = r;
3656 }
3657}
3658
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003659void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003660{
Peter Maydell50013112015-04-26 16:49:24 +01003661 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003662}
3663
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003664void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003665{
Peter Maydell50013112015-04-26 16:49:24 +01003666 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003667}
3668
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003669void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003670{
Peter Maydell50013112015-04-26 16:49:24 +01003671 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003672}
3673
aliguori5e2972f2009-03-28 17:51:36 +00003674/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003675int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003676 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003677{
3678 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003679 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003680 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003681
3682 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003683 int asidx;
3684 MemTxAttrs attrs;
3685
bellard13eb76e2004-01-24 15:23:36 +00003686 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003687 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3688 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003689 /* if no physical page mapped, return an error */
3690 if (phys_addr == -1)
3691 return -1;
3692 l = (page + TARGET_PAGE_SIZE) - addr;
3693 if (l > len)
3694 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003695 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003696 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003697 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3698 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003699 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003700 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3701 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003702 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003703 }
bellard13eb76e2004-01-24 15:23:36 +00003704 len -= l;
3705 buf += l;
3706 addr += l;
3707 }
3708 return 0;
3709}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003710
3711/*
3712 * Allows code that needs to deal with migration bitmaps etc to still be built
3713 * target independent.
3714 */
3715size_t qemu_target_page_bits(void)
3716{
3717 return TARGET_PAGE_BITS;
3718}
3719
Paul Brooka68fe892010-03-01 00:08:59 +00003720#endif
bellard13eb76e2004-01-24 15:23:36 +00003721
Blue Swirl8e4a4242013-01-06 18:30:17 +00003722/*
3723 * A helper function for the _utterly broken_ virtio device model to find out if
3724 * it's running on a big endian machine. Don't do this at home kids!
3725 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003726bool target_words_bigendian(void);
3727bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003728{
3729#if defined(TARGET_WORDS_BIGENDIAN)
3730 return true;
3731#else
3732 return false;
3733#endif
3734}
3735
Wen Congyang76f35532012-05-07 12:04:18 +08003736#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003737bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003738{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003739 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003740 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003741 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003742
Paolo Bonzini41063e12015-03-18 14:21:43 +01003743 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003744 mr = address_space_translate(&address_space_memory,
3745 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003746
Paolo Bonzini41063e12015-03-18 14:21:43 +01003747 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3748 rcu_read_unlock();
3749 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003750}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003751
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003752int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003753{
3754 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003755 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003756
Mike Day0dc3f442013-09-05 14:41:35 -04003757 rcu_read_lock();
3758 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003759 ret = func(block->idstr, block->host, block->offset,
3760 block->used_length, opaque);
3761 if (ret) {
3762 break;
3763 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003764 }
Mike Day0dc3f442013-09-05 14:41:35 -04003765 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003766 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003767}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003768#endif