blob: 7261172929c6508c80c46fdc289c1cd87f62fe86 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020025#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010027#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000028#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020029#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020031#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010032#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010033#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020038#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010041#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020042#include "hw/hw.h"
43#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010044#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020045#include "sysemu/dma.h"
46#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010047#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010048#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000049#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010050#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040051#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020052#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000053#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030054#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000055
Paolo Bonzini022c62c2012-12-17 18:19:49 +010056#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020057#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030058#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020059
Bharata B Rao9dfeca72016-05-12 09:18:12 +053060#include "migration/vmstate.h"
61
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030063#ifndef _WIN32
64#include "qemu/mmap-alloc.h"
65#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020066
blueswir1db7b5422007-05-26 17:36:03 +000067//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000068
pbrook99773bd2006-04-16 15:14:59 +000069#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040070/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
71 * are protected by the ramlist lock.
72 */
Mike Day0d53d9f2015-01-21 13:45:24 +010073RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030074
75static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030076static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030077
Avi Kivityf6790af2012-10-02 20:13:51 +020078AddressSpace address_space_io;
79AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020080
Paolo Bonzini0844e002013-05-24 14:37:28 +020081MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020082static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020083
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080084/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
85#define RAM_PREALLOC (1 << 0)
86
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080087/* RAM is mmap-ed with MAP_SHARED */
88#define RAM_SHARED (1 << 1)
89
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020090/* Only a portion of RAM (used_length) is actually used, and migrated.
91 * This used_length size can change across reboots.
92 */
93#define RAM_RESIZEABLE (1 << 2)
94
pbrooke2eef172008-06-08 01:09:01 +000095#endif
bellard9fa3e852004-01-04 18:06:42 +000096
Andreas Färberbdc44642013-06-24 23:50:24 +020097struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000098/* current CPU in the current thread. It is only valid inside
99 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +0200100__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000101/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000102 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000103 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100104int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000105
pbrooke2eef172008-06-08 01:09:01 +0000106#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200107
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200108typedef struct PhysPageEntry PhysPageEntry;
109
110struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200111 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200113 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200114 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200115};
116
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200117#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
118
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100120#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100121
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200122#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100123#define P_L2_SIZE (1 << P_L2_BITS)
124
125#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
126
127typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200128
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200129typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100130 struct rcu_head rcu;
131
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200132 unsigned sections_nb;
133 unsigned sections_nb_alloc;
134 unsigned nodes_nb;
135 unsigned nodes_nb_alloc;
136 Node *nodes;
137 MemoryRegionSection *sections;
138} PhysPageMap;
139
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200140struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100141 struct rcu_head rcu;
142
Fam Zheng729633c2016-03-01 14:18:24 +0800143 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200144 /* This is a multi-level map on the physical address space.
145 * The bottom level has pointers to MemoryRegionSections.
146 */
147 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200148 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200149 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200150};
151
Jan Kiszka90260c62013-05-26 21:46:51 +0200152#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
153typedef struct subpage_t {
154 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200155 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200156 hwaddr base;
157 uint16_t sub_section[TARGET_PAGE_SIZE];
158} subpage_t;
159
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200160#define PHYS_SECTION_UNASSIGNED 0
161#define PHYS_SECTION_NOTDIRTY 1
162#define PHYS_SECTION_ROM 2
163#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200164
pbrooke2eef172008-06-08 01:09:01 +0000165static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300166static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000167static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000168
Avi Kivity1ec9b902012-01-02 12:47:48 +0200169static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100170
171/**
172 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
173 * @cpu: the CPU whose AddressSpace this is
174 * @as: the AddressSpace itself
175 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
176 * @tcg_as_listener: listener for tracking changes to the AddressSpace
177 */
178struct CPUAddressSpace {
179 CPUState *cpu;
180 AddressSpace *as;
181 struct AddressSpaceDispatch *memory_dispatch;
182 MemoryListener tcg_as_listener;
183};
184
pbrook6658ffb2007-03-16 23:58:11 +0000185#endif
bellard54936002003-05-13 00:25:15 +0000186
Paul Brook6d9a1302010-02-28 23:55:53 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200188
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200189static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200195 }
196}
197
Paolo Bonzinidb946042015-05-21 15:12:29 +0200198static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199{
200 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200201 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200202 PhysPageEntry e;
203 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200204
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200205 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200206 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200207 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200208 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200209
210 e.skip = leaf ? 0 : 1;
211 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100212 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200213 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200214 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200215 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200216}
217
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200218static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
219 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200220 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200221{
222 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100223 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200224
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200225 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200226 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200227 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200228 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100229 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200230
Paolo Bonzini03f49952013-11-07 17:14:36 +0100231 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200232 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200233 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200234 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200235 *index += step;
236 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200237 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200238 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200239 }
240 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200241 }
242}
243
Avi Kivityac1970f2012-10-03 16:22:53 +0200244static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200245 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200246 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000247{
Avi Kivity29990972012-02-13 20:21:20 +0200248 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200249 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000250
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200251 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000252}
253
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200254/* Compact a non leaf page entry. Simply detect that the entry has a single child,
255 * and update our entry so we can skip it and go directly to the destination.
256 */
257static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
258{
259 unsigned valid_ptr = P_L2_SIZE;
260 int valid = 0;
261 PhysPageEntry *p;
262 int i;
263
264 if (lp->ptr == PHYS_MAP_NODE_NIL) {
265 return;
266 }
267
268 p = nodes[lp->ptr];
269 for (i = 0; i < P_L2_SIZE; i++) {
270 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
271 continue;
272 }
273
274 valid_ptr = i;
275 valid++;
276 if (p[i].skip) {
277 phys_page_compact(&p[i], nodes, compacted);
278 }
279 }
280
281 /* We can only compress if there's only one child. */
282 if (valid != 1) {
283 return;
284 }
285
286 assert(valid_ptr < P_L2_SIZE);
287
288 /* Don't compress if it won't fit in the # of bits we have. */
289 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
290 return;
291 }
292
293 lp->ptr = p[valid_ptr].ptr;
294 if (!p[valid_ptr].skip) {
295 /* If our only child is a leaf, make this a leaf. */
296 /* By design, we should have made this node a leaf to begin with so we
297 * should never reach here.
298 * But since it's so simple to handle this, let's do it just in case we
299 * change this rule.
300 */
301 lp->skip = 0;
302 } else {
303 lp->skip += p[valid_ptr].skip;
304 }
305}
306
307static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
308{
309 DECLARE_BITMAP(compacted, nodes_nb);
310
311 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200312 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200313 }
314}
315
Fam Zheng29cb5332016-03-01 14:18:23 +0800316static inline bool section_covers_addr(const MemoryRegionSection *section,
317 hwaddr addr)
318{
319 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
320 * the section must cover the entire address space.
321 */
322 return section->size.hi ||
323 range_covers_byte(section->offset_within_address_space,
324 section->size.lo, addr);
325}
326
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200327static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200328 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000329{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200330 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200331 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200332 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200333
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200334 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200335 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200336 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200337 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200338 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100339 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200340 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200341
Fam Zheng29cb5332016-03-01 14:18:23 +0800342 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200343 return &sections[lp.ptr];
344 } else {
345 return &sections[PHYS_SECTION_UNASSIGNED];
346 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200347}
348
Blue Swirle5548612012-04-21 13:08:33 +0000349bool memory_region_is_unassigned(MemoryRegion *mr)
350{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200351 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000352 && mr != &io_mem_watch;
353}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200354
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100355/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200356static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200357 hwaddr addr,
358 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200359{
Fam Zheng729633c2016-03-01 14:18:24 +0800360 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200361 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800362 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200363
Fam Zheng729633c2016-03-01 14:18:24 +0800364 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
365 section_covers_addr(section, addr)) {
366 update = false;
367 } else {
368 section = phys_page_find(d->phys_map, addr, d->map.nodes,
369 d->map.sections);
370 update = true;
371 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200372 if (resolve_subpage && section->mr->subpage) {
373 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200374 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200375 }
Fam Zheng729633c2016-03-01 14:18:24 +0800376 if (update) {
377 atomic_set(&d->mru_section, section);
378 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200379 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200380}
381
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100382/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200383static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200384address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200385 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200386{
387 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200388 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100389 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200390
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200391 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200392 /* Compute offset within MemoryRegionSection */
393 addr -= section->offset_within_address_space;
394
395 /* Compute offset within MemoryRegion */
396 *xlat = addr + section->offset_within_region;
397
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200398 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200399
400 /* MMIO registers can be expected to perform full-width accesses based only
401 * on their address, without considering adjacent registers that could
402 * decode to completely different MemoryRegions. When such registers
403 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
404 * regions overlap wildly. For this reason we cannot clamp the accesses
405 * here.
406 *
407 * If the length is small (as is the case for address_space_ldl/stl),
408 * everything works fine. If the incoming length is large, however,
409 * the caller really has to do the clamping through memory_access_size.
410 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200411 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200412 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200413 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
414 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200415 return section;
416}
Jan Kiszka90260c62013-05-26 21:46:51 +0200417
Paolo Bonzini41063e12015-03-18 14:21:43 +0100418/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200419MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
420 hwaddr *xlat, hwaddr *plen,
421 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200422{
Avi Kivity30951152012-10-30 13:47:46 +0200423 IOMMUTLBEntry iotlb;
424 MemoryRegionSection *section;
425 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200426
427 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100428 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
429 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200430 mr = section->mr;
431
432 if (!mr->iommu_ops) {
433 break;
434 }
435
Le Tan8d7b8cb2014-08-16 13:55:37 +0800436 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200437 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
438 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700439 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200440 if (!(iotlb.perm & (1 << is_write))) {
441 mr = &io_mem_unassigned;
442 break;
443 }
444
445 as = iotlb.target_as;
446 }
447
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000448 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100449 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700450 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100451 }
452
Avi Kivity30951152012-10-30 13:47:46 +0200453 *xlat = addr;
454 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200455}
456
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100457/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200458MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000459address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200460 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200461{
Avi Kivity30951152012-10-30 13:47:46 +0200462 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000463 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
464
465 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200466
467 assert(!section->mr->iommu_ops);
468 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200469}
bellard9fa3e852004-01-04 18:06:42 +0000470#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000471
Andreas Färberb170fce2013-01-20 20:23:22 +0100472#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000473
Juan Quintelae59fb372009-09-29 22:48:21 +0200474static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200475{
Andreas Färber259186a2013-01-17 18:51:17 +0100476 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200477
aurel323098dba2009-03-07 21:28:24 +0000478 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
479 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100480 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100481 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000482
483 return 0;
484}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200485
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400486static int cpu_common_pre_load(void *opaque)
487{
488 CPUState *cpu = opaque;
489
Paolo Bonziniadee6422014-12-19 12:53:14 +0100490 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400491
492 return 0;
493}
494
495static bool cpu_common_exception_index_needed(void *opaque)
496{
497 CPUState *cpu = opaque;
498
Paolo Bonziniadee6422014-12-19 12:53:14 +0100499 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400500}
501
502static const VMStateDescription vmstate_cpu_common_exception_index = {
503 .name = "cpu_common/exception_index",
504 .version_id = 1,
505 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200506 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400507 .fields = (VMStateField[]) {
508 VMSTATE_INT32(exception_index, CPUState),
509 VMSTATE_END_OF_LIST()
510 }
511};
512
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300513static bool cpu_common_crash_occurred_needed(void *opaque)
514{
515 CPUState *cpu = opaque;
516
517 return cpu->crash_occurred;
518}
519
520static const VMStateDescription vmstate_cpu_common_crash_occurred = {
521 .name = "cpu_common/crash_occurred",
522 .version_id = 1,
523 .minimum_version_id = 1,
524 .needed = cpu_common_crash_occurred_needed,
525 .fields = (VMStateField[]) {
526 VMSTATE_BOOL(crash_occurred, CPUState),
527 VMSTATE_END_OF_LIST()
528 }
529};
530
Andreas Färber1a1562f2013-06-17 04:09:11 +0200531const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200532 .name = "cpu_common",
533 .version_id = 1,
534 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400535 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200536 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200537 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100538 VMSTATE_UINT32(halted, CPUState),
539 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200540 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400541 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200542 .subsections = (const VMStateDescription*[]) {
543 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300544 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200545 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200546 }
547};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200548
pbrook9656f322008-07-01 20:01:19 +0000549#endif
550
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100551CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400552{
Andreas Färberbdc44642013-06-24 23:50:24 +0200553 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400554
Andreas Färberbdc44642013-06-24 23:50:24 +0200555 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100556 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200557 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100558 }
Glauber Costa950f1472009-06-09 12:15:18 -0400559 }
560
Andreas Färberbdc44642013-06-24 23:50:24 +0200561 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400562}
563
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000564#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000565void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000566{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000567 CPUAddressSpace *newas;
568
569 /* Target code should have set num_ases before calling us */
570 assert(asidx < cpu->num_ases);
571
Peter Maydell56943e82016-01-21 14:15:04 +0000572 if (asidx == 0) {
573 /* address space 0 gets the convenience alias */
574 cpu->as = as;
575 }
576
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000577 /* KVM cannot currently support multiple address spaces. */
578 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000579
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000580 if (!cpu->cpu_ases) {
581 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000582 }
Peter Maydell32857f42015-10-01 15:29:50 +0100583
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000584 newas = &cpu->cpu_ases[asidx];
585 newas->cpu = cpu;
586 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000587 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000588 newas->tcg_as_listener.commit = tcg_commit;
589 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000590 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000591}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000592
593AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
594{
595 /* Return the AddressSpace corresponding to the specified index */
596 return cpu->cpu_ases[asidx].as;
597}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000598#endif
599
Bharata B Raob7bca732015-06-23 19:31:13 -0700600#ifndef CONFIG_USER_ONLY
601static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
602
603static int cpu_get_free_index(Error **errp)
604{
605 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
606
607 if (cpu >= MAX_CPUMASK_BITS) {
608 error_setg(errp, "Trying to use more CPUs than max of %d",
609 MAX_CPUMASK_BITS);
610 return -1;
611 }
612
613 bitmap_set(cpu_index_map, cpu, 1);
614 return cpu;
615}
616
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530617static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700618{
Bharata B Raob7bca732015-06-23 19:31:13 -0700619 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
Bharata B Raob7bca732015-06-23 19:31:13 -0700620}
621#else
622
623static int cpu_get_free_index(Error **errp)
624{
625 CPUState *some_cpu;
626 int cpu_index = 0;
627
628 CPU_FOREACH(some_cpu) {
629 cpu_index++;
630 }
631 return cpu_index;
632}
633
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530634static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700635{
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530636 return;
Bharata B Raob7bca732015-06-23 19:31:13 -0700637}
638#endif
639
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530640void cpu_exec_exit(CPUState *cpu)
641{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530642 CPUClass *cc = CPU_GET_CLASS(cpu);
643
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530644#if defined(CONFIG_USER_ONLY)
645 cpu_list_lock();
646#endif
647 if (cpu->cpu_index == -1) {
648 /* cpu_index was never allocated by this @cpu or was already freed. */
649#if defined(CONFIG_USER_ONLY)
650 cpu_list_unlock();
651#endif
652 return;
653 }
654
655 QTAILQ_REMOVE(&cpus, cpu, node);
656 cpu_release_index(cpu);
657 cpu->cpu_index = -1;
658#if defined(CONFIG_USER_ONLY)
659 cpu_list_unlock();
660#endif
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530661
662 if (cc->vmsd != NULL) {
663 vmstate_unregister(NULL, cc->vmsd, cpu);
664 }
665 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
666 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
667 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530668}
669
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700670void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000671{
Andreas Färberb170fce2013-01-20 20:23:22 +0100672 CPUClass *cc = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700673 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000674
Peter Maydell56943e82016-01-21 14:15:04 +0000675 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000676 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000677
Eduardo Habkost291135b2015-04-27 17:00:33 -0300678#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300679 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000680
681 /* This is a softmmu CPU object, so create a property for it
682 * so users can wire up its memory. (This can't go in qom/cpu.c
683 * because that file is compiled only once for both user-mode
684 * and system builds.) The default if no link is set up is to use
685 * the system address space.
686 */
687 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
688 (Object **)&cpu->memory,
689 qdev_prop_allow_set_link_before_realize,
690 OBJ_PROP_LINK_UNREF_ON_RELEASE,
691 &error_abort);
692 cpu->memory = system_memory;
693 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300694#endif
695
pbrookc2764712009-03-07 15:24:59 +0000696#if defined(CONFIG_USER_ONLY)
697 cpu_list_lock();
698#endif
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200699 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700700 if (local_err) {
701 error_propagate(errp, local_err);
702#if defined(CONFIG_USER_ONLY)
703 cpu_list_unlock();
704#endif
705 return;
bellard6a00d602005-11-21 23:25:50 +0000706 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200707 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000708#if defined(CONFIG_USER_ONLY)
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200709 (void) cc;
pbrookc2764712009-03-07 15:24:59 +0000710 cpu_list_unlock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200711#else
Andreas Färbere0d47942013-07-29 04:07:50 +0200712 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200713 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200714 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100715 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200716 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100717 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200718#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000719}
720
Paul Brook94df27f2010-02-28 23:47:45 +0000721#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200722static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000723{
724 tb_invalidate_phys_page_range(pc, pc + 1, 0);
725}
726#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200727static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400728{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000729 MemTxAttrs attrs;
730 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
731 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400732 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000733 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100734 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400735 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400736}
bellardc27004e2005-01-03 23:35:10 +0000737#endif
bellardd720b932004-04-25 17:57:43 +0000738
Paul Brookc527ee82010-03-01 03:31:14 +0000739#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200740void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000741
742{
743}
744
Peter Maydell3ee887e2014-09-12 14:06:48 +0100745int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
746 int flags)
747{
748 return -ENOSYS;
749}
750
751void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
752{
753}
754
Andreas Färber75a34032013-09-02 16:57:02 +0200755int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000756 int flags, CPUWatchpoint **watchpoint)
757{
758 return -ENOSYS;
759}
760#else
pbrook6658ffb2007-03-16 23:58:11 +0000761/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200762int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000763 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000764{
aliguoric0ce9982008-11-25 22:13:57 +0000765 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000766
Peter Maydell05068c02014-09-12 14:06:48 +0100767 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700768 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200769 error_report("tried to set invalid watchpoint at %"
770 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000771 return -EINVAL;
772 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500773 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000774
aliguoria1d1bb32008-11-18 20:07:32 +0000775 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100776 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000777 wp->flags = flags;
778
aliguori2dc9f412008-11-18 20:56:59 +0000779 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200780 if (flags & BP_GDB) {
781 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
782 } else {
783 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
784 }
aliguoria1d1bb32008-11-18 20:07:32 +0000785
Andreas Färber31b030d2013-09-04 01:29:02 +0200786 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000787
788 if (watchpoint)
789 *watchpoint = wp;
790 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000791}
792
aliguoria1d1bb32008-11-18 20:07:32 +0000793/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200794int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000795 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000796{
aliguoria1d1bb32008-11-18 20:07:32 +0000797 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000798
Andreas Färberff4700b2013-08-26 18:23:18 +0200799 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100800 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000801 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200802 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000803 return 0;
804 }
805 }
aliguoria1d1bb32008-11-18 20:07:32 +0000806 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000807}
808
aliguoria1d1bb32008-11-18 20:07:32 +0000809/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200810void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000811{
Andreas Färberff4700b2013-08-26 18:23:18 +0200812 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000813
Andreas Färber31b030d2013-09-04 01:29:02 +0200814 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000815
Anthony Liguori7267c092011-08-20 22:09:37 -0500816 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000817}
818
aliguoria1d1bb32008-11-18 20:07:32 +0000819/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200820void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000821{
aliguoric0ce9982008-11-25 22:13:57 +0000822 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000823
Andreas Färberff4700b2013-08-26 18:23:18 +0200824 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200825 if (wp->flags & mask) {
826 cpu_watchpoint_remove_by_ref(cpu, wp);
827 }
aliguoric0ce9982008-11-25 22:13:57 +0000828 }
aliguoria1d1bb32008-11-18 20:07:32 +0000829}
Peter Maydell05068c02014-09-12 14:06:48 +0100830
831/* Return true if this watchpoint address matches the specified
832 * access (ie the address range covered by the watchpoint overlaps
833 * partially or completely with the address range covered by the
834 * access).
835 */
836static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
837 vaddr addr,
838 vaddr len)
839{
840 /* We know the lengths are non-zero, but a little caution is
841 * required to avoid errors in the case where the range ends
842 * exactly at the top of the address space and so addr + len
843 * wraps round to zero.
844 */
845 vaddr wpend = wp->vaddr + wp->len - 1;
846 vaddr addrend = addr + len - 1;
847
848 return !(addr > wpend || wp->vaddr > addrend);
849}
850
Paul Brookc527ee82010-03-01 03:31:14 +0000851#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000852
853/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200854int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000855 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000856{
aliguoric0ce9982008-11-25 22:13:57 +0000857 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000858
Anthony Liguori7267c092011-08-20 22:09:37 -0500859 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000860
861 bp->pc = pc;
862 bp->flags = flags;
863
aliguori2dc9f412008-11-18 20:56:59 +0000864 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200865 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200866 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200867 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200868 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200869 }
aliguoria1d1bb32008-11-18 20:07:32 +0000870
Andreas Färberf0c3c502013-08-26 21:22:53 +0200871 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000872
Andreas Färber00b941e2013-06-29 18:55:54 +0200873 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000874 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200875 }
aliguoria1d1bb32008-11-18 20:07:32 +0000876 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000877}
878
879/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200880int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000881{
aliguoria1d1bb32008-11-18 20:07:32 +0000882 CPUBreakpoint *bp;
883
Andreas Färberf0c3c502013-08-26 21:22:53 +0200884 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000885 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200886 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000887 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000888 }
bellard4c3a88a2003-07-26 12:06:08 +0000889 }
aliguoria1d1bb32008-11-18 20:07:32 +0000890 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000891}
892
aliguoria1d1bb32008-11-18 20:07:32 +0000893/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200894void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000895{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200896 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
897
898 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000899
Anthony Liguori7267c092011-08-20 22:09:37 -0500900 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000901}
902
903/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200904void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000905{
aliguoric0ce9982008-11-25 22:13:57 +0000906 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000907
Andreas Färberf0c3c502013-08-26 21:22:53 +0200908 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200909 if (bp->flags & mask) {
910 cpu_breakpoint_remove_by_ref(cpu, bp);
911 }
aliguoric0ce9982008-11-25 22:13:57 +0000912 }
bellard4c3a88a2003-07-26 12:06:08 +0000913}
914
bellardc33a3462003-07-29 20:50:33 +0000915/* enable or disable single step mode. EXCP_DEBUG is returned by the
916 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200917void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000918{
Andreas Färbered2803d2013-06-21 20:20:45 +0200919 if (cpu->singlestep_enabled != enabled) {
920 cpu->singlestep_enabled = enabled;
921 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200922 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200923 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100924 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000925 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700926 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000927 }
bellardc33a3462003-07-29 20:50:33 +0000928 }
bellardc33a3462003-07-29 20:50:33 +0000929}
930
Andreas Färbera47dddd2013-09-03 17:38:47 +0200931void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000932{
933 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000934 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000935
936 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000937 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000938 fprintf(stderr, "qemu: fatal: ");
939 vfprintf(stderr, fmt, ap);
940 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200941 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100942 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000943 qemu_log("qemu: fatal: ");
944 qemu_log_vprintf(fmt, ap2);
945 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200946 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000947 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000948 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000949 }
pbrook493ae1f2007-11-23 16:53:59 +0000950 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000951 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300952 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200953#if defined(CONFIG_USER_ONLY)
954 {
955 struct sigaction act;
956 sigfillset(&act.sa_mask);
957 act.sa_handler = SIG_DFL;
958 sigaction(SIGABRT, &act, NULL);
959 }
960#endif
bellard75012672003-06-21 13:11:07 +0000961 abort();
962}
963
bellard01243112004-01-04 15:48:17 +0000964#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400965/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200966static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
967{
968 RAMBlock *block;
969
Paolo Bonzini43771532013-09-09 17:58:40 +0200970 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200971 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200972 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200973 }
Mike Day0dc3f442013-09-05 14:41:35 -0400974 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200975 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200976 goto found;
977 }
978 }
979
980 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
981 abort();
982
983found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200984 /* It is safe to write mru_block outside the iothread lock. This
985 * is what happens:
986 *
987 * mru_block = xxx
988 * rcu_read_unlock()
989 * xxx removed from list
990 * rcu_read_lock()
991 * read mru_block
992 * mru_block = NULL;
993 * call_rcu(reclaim_ramblock, xxx);
994 * rcu_read_unlock()
995 *
996 * atomic_rcu_set is not needed here. The block was already published
997 * when it was placed into the list. Here we're just making an extra
998 * copy of the pointer.
999 */
Paolo Bonzini041603f2013-09-09 17:49:45 +02001000 ram_list.mru_block = block;
1001 return block;
1002}
1003
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001004static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001005{
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001006 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +02001007 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001008 RAMBlock *block;
1009 ram_addr_t end;
1010
1011 end = TARGET_PAGE_ALIGN(start + length);
1012 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +00001013
Mike Day0dc3f442013-09-05 14:41:35 -04001014 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +02001015 block = qemu_get_ram_block(start);
1016 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001017 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001018 CPU_FOREACH(cpu) {
1019 tlb_reset_dirty(cpu, start1, length);
1020 }
Mike Day0dc3f442013-09-05 14:41:35 -04001021 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +02001022}
1023
1024/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001025bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1026 ram_addr_t length,
1027 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001028{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001029 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001030 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001031 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001032
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001033 if (length == 0) {
1034 return false;
1035 }
1036
1037 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1038 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001039
1040 rcu_read_lock();
1041
1042 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1043
1044 while (page < end) {
1045 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1046 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1047 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1048
1049 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1050 offset, num);
1051 page += num;
1052 }
1053
1054 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001055
1056 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001057 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001058 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001059
1060 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001061}
1062
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001063/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001064hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001065 MemoryRegionSection *section,
1066 target_ulong vaddr,
1067 hwaddr paddr, hwaddr xlat,
1068 int prot,
1069 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001070{
Avi Kivitya8170e52012-10-23 12:30:10 +02001071 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001072 CPUWatchpoint *wp;
1073
Blue Swirlcc5bea62012-04-14 14:56:48 +00001074 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001075 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001076 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001077 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001078 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001079 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001080 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001081 }
1082 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001083 AddressSpaceDispatch *d;
1084
1085 d = atomic_rcu_read(&section->address_space->dispatch);
1086 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001087 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001088 }
1089
1090 /* Make accesses to pages with watchpoints go via the
1091 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001092 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001093 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001094 /* Avoid trapping reads of pages with a write breakpoint. */
1095 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001096 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001097 *address |= TLB_MMIO;
1098 break;
1099 }
1100 }
1101 }
1102
1103 return iotlb;
1104}
bellard9fa3e852004-01-04 18:06:42 +00001105#endif /* defined(CONFIG_USER_ONLY) */
1106
pbrooke2eef172008-06-08 01:09:01 +00001107#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001108
Anthony Liguoric227f092009-10-01 16:12:16 -05001109static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001110 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001111static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001112
Igor Mammedova2b257d2014-10-31 16:38:37 +00001113static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1114 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001115
1116/*
1117 * Set a custom physical guest memory alloator.
1118 * Accelerators with unusual needs may need this. Hopefully, we can
1119 * get rid of it eventually.
1120 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001121void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001122{
1123 phys_mem_alloc = alloc;
1124}
1125
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001126static uint16_t phys_section_add(PhysPageMap *map,
1127 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001128{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001129 /* The physical section number is ORed with a page-aligned
1130 * pointer to produce the iotlb entries. Thus it should
1131 * never overflow into the page-aligned value.
1132 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001133 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001134
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001135 if (map->sections_nb == map->sections_nb_alloc) {
1136 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1137 map->sections = g_renew(MemoryRegionSection, map->sections,
1138 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001139 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001140 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001141 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001142 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001143}
1144
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001145static void phys_section_destroy(MemoryRegion *mr)
1146{
Don Slutz55b4e802015-11-30 17:11:04 -05001147 bool have_sub_page = mr->subpage;
1148
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001149 memory_region_unref(mr);
1150
Don Slutz55b4e802015-11-30 17:11:04 -05001151 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001152 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001153 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001154 g_free(subpage);
1155 }
1156}
1157
Paolo Bonzini60926662013-05-29 12:30:26 +02001158static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001159{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001160 while (map->sections_nb > 0) {
1161 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001162 phys_section_destroy(section->mr);
1163 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001164 g_free(map->sections);
1165 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001166}
1167
Avi Kivityac1970f2012-10-03 16:22:53 +02001168static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001169{
1170 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001171 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001172 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001173 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001174 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001175 MemoryRegionSection subsection = {
1176 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001177 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001178 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001179 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001180
Avi Kivityf3705d52012-03-08 16:16:34 +02001181 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001182
Avi Kivityf3705d52012-03-08 16:16:34 +02001183 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001184 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001185 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001186 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001187 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001188 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001189 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001190 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001191 }
1192 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001194 subpage_register(subpage, start, end,
1195 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001196}
1197
1198
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001199static void register_multipage(AddressSpaceDispatch *d,
1200 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001201{
Avi Kivitya8170e52012-10-23 12:30:10 +02001202 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001203 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001204 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1205 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001206
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001207 assert(num_pages);
1208 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001209}
1210
Avi Kivityac1970f2012-10-03 16:22:53 +02001211static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001212{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001213 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001214 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001215 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001216 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001217
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001218 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1219 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1220 - now.offset_within_address_space;
1221
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001222 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001223 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001224 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001225 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001226 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001227 while (int128_ne(remain.size, now.size)) {
1228 remain.size = int128_sub(remain.size, now.size);
1229 remain.offset_within_address_space += int128_get64(now.size);
1230 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001231 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001232 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001233 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001234 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001235 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001236 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001237 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001238 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001239 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001240 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001241 }
1242}
1243
Sheng Yang62a27442010-01-26 19:21:16 +08001244void qemu_flush_coalesced_mmio_buffer(void)
1245{
1246 if (kvm_enabled())
1247 kvm_flush_coalesced_mmio_buffer();
1248}
1249
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001250void qemu_mutex_lock_ramlist(void)
1251{
1252 qemu_mutex_lock(&ram_list.mutex);
1253}
1254
1255void qemu_mutex_unlock_ramlist(void)
1256{
1257 qemu_mutex_unlock(&ram_list.mutex);
1258}
1259
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001260#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001261static void *file_ram_alloc(RAMBlock *block,
1262 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001263 const char *path,
1264 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001265{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001266 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001267 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001268 char *sanitized_name;
1269 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001270 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001271 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001272 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001273
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001274 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1275 error_setg(errp,
1276 "host lacks kvm mmu notifiers, -mem-path unsupported");
1277 return NULL;
1278 }
1279
1280 for (;;) {
1281 fd = open(path, O_RDWR);
1282 if (fd >= 0) {
1283 /* @path names an existing file, use it */
1284 break;
1285 }
1286 if (errno == ENOENT) {
1287 /* @path names a file that doesn't exist, create it */
1288 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1289 if (fd >= 0) {
1290 unlink_on_error = true;
1291 break;
1292 }
1293 } else if (errno == EISDIR) {
1294 /* @path names a directory, create a file there */
1295 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1296 sanitized_name = g_strdup(memory_region_name(block->mr));
1297 for (c = sanitized_name; *c != '\0'; c++) {
1298 if (*c == '/') {
1299 *c = '_';
1300 }
1301 }
1302
1303 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1304 sanitized_name);
1305 g_free(sanitized_name);
1306
1307 fd = mkstemp(filename);
1308 if (fd >= 0) {
1309 unlink(filename);
1310 g_free(filename);
1311 break;
1312 }
1313 g_free(filename);
1314 }
1315 if (errno != EEXIST && errno != EINTR) {
1316 error_setg_errno(errp, errno,
1317 "can't open backing store %s for guest RAM",
1318 path);
1319 goto error;
1320 }
1321 /*
1322 * Try again on EINTR and EEXIST. The latter happens when
1323 * something else creates the file between our two open().
1324 */
1325 }
1326
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001327 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001328 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001329
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001330 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001331 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001332 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001333 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001334 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001335 }
1336
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001337 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001338
1339 /*
1340 * ftruncate is not supported by hugetlbfs in older
1341 * hosts, so don't bother bailing out on errors.
1342 * If anything goes wrong with it under other filesystems,
1343 * mmap will fail.
1344 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001345 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001346 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001347 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001348
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001349 area = qemu_ram_mmap(fd, memory, block->mr->align,
1350 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001351 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001352 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001353 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001354 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001355 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001356
1357 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001358 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001359 }
1360
Alex Williamson04b16652010-07-02 11:13:17 -06001361 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001362 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001363
1364error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001365 if (unlink_on_error) {
1366 unlink(path);
1367 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001368 if (fd != -1) {
1369 close(fd);
1370 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001371 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001372}
1373#endif
1374
Mike Day0dc3f442013-09-05 14:41:35 -04001375/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001376static ram_addr_t find_ram_offset(ram_addr_t size)
1377{
Alex Williamson04b16652010-07-02 11:13:17 -06001378 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001379 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001380
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001381 assert(size != 0); /* it would hand out same offset multiple times */
1382
Mike Day0dc3f442013-09-05 14:41:35 -04001383 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001384 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001385 }
Alex Williamson04b16652010-07-02 11:13:17 -06001386
Mike Day0dc3f442013-09-05 14:41:35 -04001387 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001388 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001389
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001390 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001391
Mike Day0dc3f442013-09-05 14:41:35 -04001392 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001393 if (next_block->offset >= end) {
1394 next = MIN(next, next_block->offset);
1395 }
1396 }
1397 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001398 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001399 mingap = next - end;
1400 }
1401 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001402
1403 if (offset == RAM_ADDR_MAX) {
1404 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1405 (uint64_t)size);
1406 abort();
1407 }
1408
Alex Williamson04b16652010-07-02 11:13:17 -06001409 return offset;
1410}
1411
Juan Quintela652d7ec2012-07-20 10:37:54 +02001412ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001413{
Alex Williamsond17b5282010-06-25 11:08:38 -06001414 RAMBlock *block;
1415 ram_addr_t last = 0;
1416
Mike Day0dc3f442013-09-05 14:41:35 -04001417 rcu_read_lock();
1418 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001419 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001420 }
Mike Day0dc3f442013-09-05 14:41:35 -04001421 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001422 return last;
1423}
1424
Jason Baronddb97f12012-08-02 15:44:16 -04001425static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1426{
1427 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001428
1429 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001430 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001431 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1432 if (ret) {
1433 perror("qemu_madvise");
1434 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1435 "but dump_guest_core=off specified\n");
1436 }
1437 }
1438}
1439
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001440const char *qemu_ram_get_idstr(RAMBlock *rb)
1441{
1442 return rb->idstr;
1443}
1444
Mike Dayae3a7042013-09-05 14:41:35 -04001445/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001446void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001447{
Gongleifa53a0e2016-05-10 10:04:59 +08001448 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001449
Avi Kivityc5705a72011-12-20 15:59:12 +02001450 assert(new_block);
1451 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001452
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001453 if (dev) {
1454 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001455 if (id) {
1456 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001457 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001458 }
1459 }
1460 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1461
Gongleiab0a9952016-05-10 10:05:00 +08001462 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001463 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001464 if (block != new_block &&
1465 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001466 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1467 new_block->idstr);
1468 abort();
1469 }
1470 }
Mike Day0dc3f442013-09-05 14:41:35 -04001471 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001472}
1473
Mike Dayae3a7042013-09-05 14:41:35 -04001474/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001475void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001476{
Mike Dayae3a7042013-09-05 14:41:35 -04001477 /* FIXME: arch_init.c assumes that this is not called throughout
1478 * migration. Ignore the problem since hot-unplug during migration
1479 * does not work anyway.
1480 */
Hu Tao20cfe882014-04-02 15:13:26 +08001481 if (block) {
1482 memset(block->idstr, 0, sizeof(block->idstr));
1483 }
1484}
1485
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001486static int memory_try_enable_merging(void *addr, size_t len)
1487{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001488 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001489 /* disabled by the user */
1490 return 0;
1491 }
1492
1493 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1494}
1495
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001496/* Only legal before guest might have detected the memory size: e.g. on
1497 * incoming migration, or right after reset.
1498 *
1499 * As memory core doesn't know how is memory accessed, it is up to
1500 * resize callback to update device state and/or add assertions to detect
1501 * misuse, if necessary.
1502 */
Gongleifa53a0e2016-05-10 10:04:59 +08001503int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001504{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001505 assert(block);
1506
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001507 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001508
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001509 if (block->used_length == newsize) {
1510 return 0;
1511 }
1512
1513 if (!(block->flags & RAM_RESIZEABLE)) {
1514 error_setg_errno(errp, EINVAL,
1515 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1516 " in != 0x" RAM_ADDR_FMT, block->idstr,
1517 newsize, block->used_length);
1518 return -EINVAL;
1519 }
1520
1521 if (block->max_length < newsize) {
1522 error_setg_errno(errp, EINVAL,
1523 "Length too large: %s: 0x" RAM_ADDR_FMT
1524 " > 0x" RAM_ADDR_FMT, block->idstr,
1525 newsize, block->max_length);
1526 return -EINVAL;
1527 }
1528
1529 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1530 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001531 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1532 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001533 memory_region_set_size(block->mr, newsize);
1534 if (block->resized) {
1535 block->resized(block->idstr, newsize, block->host);
1536 }
1537 return 0;
1538}
1539
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001540/* Called with ram_list.mutex held */
1541static void dirty_memory_extend(ram_addr_t old_ram_size,
1542 ram_addr_t new_ram_size)
1543{
1544 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1545 DIRTY_MEMORY_BLOCK_SIZE);
1546 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1547 DIRTY_MEMORY_BLOCK_SIZE);
1548 int i;
1549
1550 /* Only need to extend if block count increased */
1551 if (new_num_blocks <= old_num_blocks) {
1552 return;
1553 }
1554
1555 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1556 DirtyMemoryBlocks *old_blocks;
1557 DirtyMemoryBlocks *new_blocks;
1558 int j;
1559
1560 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1561 new_blocks = g_malloc(sizeof(*new_blocks) +
1562 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1563
1564 if (old_num_blocks) {
1565 memcpy(new_blocks->blocks, old_blocks->blocks,
1566 old_num_blocks * sizeof(old_blocks->blocks[0]));
1567 }
1568
1569 for (j = old_num_blocks; j < new_num_blocks; j++) {
1570 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1571 }
1572
1573 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1574
1575 if (old_blocks) {
1576 g_free_rcu(old_blocks, rcu);
1577 }
1578 }
1579}
1580
Fam Zheng528f46a2016-03-01 14:18:18 +08001581static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001582{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001583 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001584 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001585 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001586 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001587
1588 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001589
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001590 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001591 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001592
1593 if (!new_block->host) {
1594 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001595 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001596 new_block->mr, &err);
1597 if (err) {
1598 error_propagate(errp, err);
1599 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001600 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001601 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001602 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001603 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001604 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001605 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001606 error_setg_errno(errp, errno,
1607 "cannot set up guest memory '%s'",
1608 memory_region_name(new_block->mr));
1609 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001610 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001611 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001612 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001613 }
1614 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001615
Li Zhijiandd631692015-07-02 20:18:06 +08001616 new_ram_size = MAX(old_ram_size,
1617 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1618 if (new_ram_size > old_ram_size) {
1619 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001620 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001621 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001622 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1623 * QLIST (which has an RCU-friendly variant) does not have insertion at
1624 * tail, so save the last element in last_block.
1625 */
Mike Day0dc3f442013-09-05 14:41:35 -04001626 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001627 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001628 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001629 break;
1630 }
1631 }
1632 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001633 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001634 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001635 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001636 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001637 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001638 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001639 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001640
Mike Day0dc3f442013-09-05 14:41:35 -04001641 /* Write list before version */
1642 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001643 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001644 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001645
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001646 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001647 new_block->used_length,
1648 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001649
Paolo Bonzinia904c912015-01-21 16:18:35 +01001650 if (new_block->host) {
1651 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1652 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1653 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1654 if (kvm_enabled()) {
1655 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1656 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001658}
1659
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001660#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001661RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1662 bool share, const char *mem_path,
1663 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001664{
1665 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001666 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001667
1668 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001669 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001670 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001671 }
1672
1673 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1674 /*
1675 * file_ram_alloc() needs to allocate just like
1676 * phys_mem_alloc, but we haven't bothered to provide
1677 * a hook there.
1678 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001679 error_setg(errp,
1680 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001681 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001682 }
1683
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001684 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001685 new_block = g_malloc0(sizeof(*new_block));
1686 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001687 new_block->used_length = size;
1688 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001689 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001690 new_block->host = file_ram_alloc(new_block, size,
1691 mem_path, errp);
1692 if (!new_block->host) {
1693 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001694 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001695 }
1696
Fam Zheng528f46a2016-03-01 14:18:18 +08001697 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001698 if (local_err) {
1699 g_free(new_block);
1700 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001701 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001702 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001703 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001704}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001705#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001706
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001707static
Fam Zheng528f46a2016-03-01 14:18:18 +08001708RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1709 void (*resized)(const char*,
1710 uint64_t length,
1711 void *host),
1712 void *host, bool resizeable,
1713 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001714{
1715 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001716 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001717
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001718 size = HOST_PAGE_ALIGN(size);
1719 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001720 new_block = g_malloc0(sizeof(*new_block));
1721 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001722 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001723 new_block->used_length = size;
1724 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001725 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001726 new_block->fd = -1;
1727 new_block->host = host;
1728 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001729 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001730 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001731 if (resizeable) {
1732 new_block->flags |= RAM_RESIZEABLE;
1733 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001734 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001735 if (local_err) {
1736 g_free(new_block);
1737 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001738 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001739 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001740 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001741}
1742
Fam Zheng528f46a2016-03-01 14:18:18 +08001743RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001744 MemoryRegion *mr, Error **errp)
1745{
1746 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1747}
1748
Fam Zheng528f46a2016-03-01 14:18:18 +08001749RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001750{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001751 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1752}
1753
Fam Zheng528f46a2016-03-01 14:18:18 +08001754RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001755 void (*resized)(const char*,
1756 uint64_t length,
1757 void *host),
1758 MemoryRegion *mr, Error **errp)
1759{
1760 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001761}
bellarde9a1ab12007-02-08 23:08:38 +00001762
Paolo Bonzini43771532013-09-09 17:58:40 +02001763static void reclaim_ramblock(RAMBlock *block)
1764{
1765 if (block->flags & RAM_PREALLOC) {
1766 ;
1767 } else if (xen_enabled()) {
1768 xen_invalidate_map_cache_entry(block->host);
1769#ifndef _WIN32
1770 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001771 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001772 close(block->fd);
1773#endif
1774 } else {
1775 qemu_anon_ram_free(block->host, block->max_length);
1776 }
1777 g_free(block);
1778}
1779
Fam Zhengf1060c52016-03-01 14:18:22 +08001780void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001781{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001782 if (!block) {
1783 return;
1784 }
1785
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001786 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001787 QLIST_REMOVE_RCU(block, next);
1788 ram_list.mru_block = NULL;
1789 /* Write list before version */
1790 smp_wmb();
1791 ram_list.version++;
1792 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001793 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001794}
1795
Huang Yingcd19cfa2011-03-02 08:56:19 +01001796#ifndef _WIN32
1797void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1798{
1799 RAMBlock *block;
1800 ram_addr_t offset;
1801 int flags;
1802 void *area, *vaddr;
1803
Mike Day0dc3f442013-09-05 14:41:35 -04001804 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001805 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001806 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001807 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001808 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001809 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001810 } else if (xen_enabled()) {
1811 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001812 } else {
1813 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001814 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001815 flags |= (block->flags & RAM_SHARED ?
1816 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001817 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1818 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001819 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001820 /*
1821 * Remap needs to match alloc. Accelerators that
1822 * set phys_mem_alloc never remap. If they did,
1823 * we'd need a remap hook here.
1824 */
1825 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1826
Huang Yingcd19cfa2011-03-02 08:56:19 +01001827 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1828 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1829 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001830 }
1831 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001832 fprintf(stderr, "Could not remap addr: "
1833 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001834 length, addr);
1835 exit(1);
1836 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001837 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001838 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001839 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001840 }
1841 }
1842}
1843#endif /* !_WIN32 */
1844
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001845int qemu_get_ram_fd(ram_addr_t addr)
1846{
Mike Dayae3a7042013-09-05 14:41:35 -04001847 RAMBlock *block;
1848 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001849
Mike Day0dc3f442013-09-05 14:41:35 -04001850 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001851 block = qemu_get_ram_block(addr);
1852 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001853 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001854 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001855}
1856
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001857void qemu_set_ram_fd(ram_addr_t addr, int fd)
1858{
1859 RAMBlock *block;
1860
1861 rcu_read_lock();
1862 block = qemu_get_ram_block(addr);
1863 block->fd = fd;
1864 rcu_read_unlock();
1865}
1866
Damjan Marion3fd74b82014-06-26 23:01:32 +02001867void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1868{
Mike Dayae3a7042013-09-05 14:41:35 -04001869 RAMBlock *block;
1870 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001871
Mike Day0dc3f442013-09-05 14:41:35 -04001872 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001873 block = qemu_get_ram_block(addr);
1874 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001875 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001876 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001877}
1878
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001879/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001880 * This should not be used for general purpose DMA. Use address_space_map
1881 * or address_space_rw instead. For local memory (e.g. video ram) that the
1882 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001883 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001884 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001885 */
Gonglei3655cb92016-02-20 10:35:20 +08001886void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001887{
Gonglei3655cb92016-02-20 10:35:20 +08001888 RAMBlock *block = ram_block;
1889
1890 if (block == NULL) {
1891 block = qemu_get_ram_block(addr);
1892 }
Mike Dayae3a7042013-09-05 14:41:35 -04001893
1894 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001895 /* We need to check if the requested address is in the RAM
1896 * because we don't want to map the entire memory in QEMU.
1897 * In that case just map until the end of the page.
1898 */
1899 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001900 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001901 }
Mike Dayae3a7042013-09-05 14:41:35 -04001902
1903 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001904 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001905 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001906}
1907
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001908/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001909 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001910 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001911 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001912 */
Gonglei3655cb92016-02-20 10:35:20 +08001913static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1914 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001915{
Gonglei3655cb92016-02-20 10:35:20 +08001916 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001917 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001918 if (*size == 0) {
1919 return NULL;
1920 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001921
Gonglei3655cb92016-02-20 10:35:20 +08001922 if (block == NULL) {
1923 block = qemu_get_ram_block(addr);
1924 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001925 offset_inside_block = addr - block->offset;
1926 *size = MIN(*size, block->max_length - offset_inside_block);
1927
1928 if (xen_enabled() && block->host == NULL) {
1929 /* We need to check if the requested address is in the RAM
1930 * because we don't want to map the entire memory in QEMU.
1931 * In that case just map the requested area.
1932 */
1933 if (block->offset == 0) {
1934 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001935 }
1936
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001937 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001938 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001939
1940 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001941}
1942
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001943/*
1944 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1945 * in that RAMBlock.
1946 *
1947 * ptr: Host pointer to look up
1948 * round_offset: If true round the result offset down to a page boundary
1949 * *ram_addr: set to result ram_addr
1950 * *offset: set to result offset within the RAMBlock
1951 *
1952 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001953 *
1954 * By the time this function returns, the returned pointer is not protected
1955 * by RCU anymore. If the caller is not within an RCU critical section and
1956 * does not hold the iothread lock, it must have other means of protecting the
1957 * pointer, such as a reference to the region that includes the incoming
1958 * ram_addr_t.
1959 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001960RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1961 ram_addr_t *ram_addr,
1962 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001963{
pbrook94a6b542009-04-11 17:15:54 +00001964 RAMBlock *block;
1965 uint8_t *host = ptr;
1966
Jan Kiszka868bb332011-06-21 22:59:09 +02001967 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001968 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001969 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001970 block = qemu_get_ram_block(*ram_addr);
1971 if (block) {
1972 *offset = (host - block->host);
1973 }
Mike Day0dc3f442013-09-05 14:41:35 -04001974 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001975 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001976 }
1977
Mike Day0dc3f442013-09-05 14:41:35 -04001978 rcu_read_lock();
1979 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001980 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001981 goto found;
1982 }
1983
Mike Day0dc3f442013-09-05 14:41:35 -04001984 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001985 /* This case append when the block is not mapped. */
1986 if (block->host == NULL) {
1987 continue;
1988 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001989 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001990 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001991 }
pbrook94a6b542009-04-11 17:15:54 +00001992 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001993
Mike Day0dc3f442013-09-05 14:41:35 -04001994 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001995 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001996
1997found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001998 *offset = (host - block->host);
1999 if (round_offset) {
2000 *offset &= TARGET_PAGE_MASK;
2001 }
2002 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04002003 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002004 return block;
2005}
2006
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002007/*
2008 * Finds the named RAMBlock
2009 *
2010 * name: The name of RAMBlock to find
2011 *
2012 * Returns: RAMBlock (or NULL if not found)
2013 */
2014RAMBlock *qemu_ram_block_by_name(const char *name)
2015{
2016 RAMBlock *block;
2017
2018 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2019 if (!strcmp(name, block->idstr)) {
2020 return block;
2021 }
2022 }
2023
2024 return NULL;
2025}
2026
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002027/* Some of the softmmu routines need to translate from a host pointer
2028 (typically a TLB entry) back to a ram offset. */
2029MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2030{
2031 RAMBlock *block;
2032 ram_addr_t offset; /* Not used */
2033
2034 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2035
2036 if (!block) {
2037 return NULL;
2038 }
2039
2040 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002041}
Alex Williamsonf471a172010-06-11 11:11:42 -06002042
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002043/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002044static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002045 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002046{
Juan Quintela52159192013-10-08 12:44:04 +02002047 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002048 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002049 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002050 switch (size) {
2051 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002052 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002053 break;
2054 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002055 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002056 break;
2057 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002058 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002059 break;
2060 default:
2061 abort();
2062 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002063 /* Set both VGA and migration bits for simplicity and to remove
2064 * the notdirty callback faster.
2065 */
2066 cpu_physical_memory_set_dirty_range(ram_addr, size,
2067 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002068 /* we remove the notdirty callback only if the code has been
2069 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002070 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002071 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002072 }
bellard1ccde1c2004-02-06 19:46:14 +00002073}
2074
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002075static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2076 unsigned size, bool is_write)
2077{
2078 return is_write;
2079}
2080
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002081static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002082 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002083 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002084 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002085};
2086
pbrook0f459d12008-06-09 00:20:13 +00002087/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002088static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002089{
Andreas Färber93afead2013-08-26 03:41:01 +02002090 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002091 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002092 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002093 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002094 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002095 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002096 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002097
Andreas Färberff4700b2013-08-26 18:23:18 +02002098 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002099 /* We re-entered the check after replacing the TB. Now raise
2100 * the debug interrupt so that is will trigger after the
2101 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002102 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002103 return;
2104 }
Andreas Färber93afead2013-08-26 03:41:01 +02002105 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002106 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002107 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2108 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002109 if (flags == BP_MEM_READ) {
2110 wp->flags |= BP_WATCHPOINT_HIT_READ;
2111 } else {
2112 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2113 }
2114 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002115 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002116 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002117 if (wp->flags & BP_CPU &&
2118 !cc->debug_check_watchpoint(cpu, wp)) {
2119 wp->flags &= ~BP_WATCHPOINT_HIT;
2120 continue;
2121 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002122 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002123 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002124 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002125 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002126 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002127 } else {
2128 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002129 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002130 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002131 }
aliguori06d55cc2008-11-18 20:24:06 +00002132 }
aliguori6e140f22008-11-18 20:37:55 +00002133 } else {
2134 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002135 }
2136 }
2137}
2138
pbrook6658ffb2007-03-16 23:58:11 +00002139/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2140 so these check for a hit then pass through to the normal out-of-line
2141 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002142static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2143 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002144{
Peter Maydell66b9b432015-04-26 16:49:24 +01002145 MemTxResult res;
2146 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002147 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2148 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002149
Peter Maydell66b9b432015-04-26 16:49:24 +01002150 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002151 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002152 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002153 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002154 break;
2155 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002156 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002157 break;
2158 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002159 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002160 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002161 default: abort();
2162 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002163 *pdata = data;
2164 return res;
2165}
2166
2167static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2168 uint64_t val, unsigned size,
2169 MemTxAttrs attrs)
2170{
2171 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002172 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2173 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002174
2175 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2176 switch (size) {
2177 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002178 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002179 break;
2180 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002181 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002182 break;
2183 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002184 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002185 break;
2186 default: abort();
2187 }
2188 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002189}
2190
Avi Kivity1ec9b902012-01-02 12:47:48 +02002191static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002192 .read_with_attrs = watch_mem_read,
2193 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002194 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002195};
pbrook6658ffb2007-03-16 23:58:11 +00002196
Peter Maydellf25a49e2015-04-26 16:49:24 +01002197static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2198 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002199{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002200 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002201 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002202 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002203
blueswir1db7b5422007-05-26 17:36:03 +00002204#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002205 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002206 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002207#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002208 res = address_space_read(subpage->as, addr + subpage->base,
2209 attrs, buf, len);
2210 if (res) {
2211 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002212 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002213 switch (len) {
2214 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002215 *data = ldub_p(buf);
2216 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002217 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002218 *data = lduw_p(buf);
2219 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002220 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002221 *data = ldl_p(buf);
2222 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002223 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002224 *data = ldq_p(buf);
2225 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002226 default:
2227 abort();
2228 }
blueswir1db7b5422007-05-26 17:36:03 +00002229}
2230
Peter Maydellf25a49e2015-04-26 16:49:24 +01002231static MemTxResult subpage_write(void *opaque, hwaddr addr,
2232 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002233{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002234 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002235 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002236
blueswir1db7b5422007-05-26 17:36:03 +00002237#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002238 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002239 " value %"PRIx64"\n",
2240 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002241#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002242 switch (len) {
2243 case 1:
2244 stb_p(buf, value);
2245 break;
2246 case 2:
2247 stw_p(buf, value);
2248 break;
2249 case 4:
2250 stl_p(buf, value);
2251 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002252 case 8:
2253 stq_p(buf, value);
2254 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002255 default:
2256 abort();
2257 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002258 return address_space_write(subpage->as, addr + subpage->base,
2259 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002260}
2261
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002262static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002263 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002264{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002265 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002266#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002267 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002268 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002269#endif
2270
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002271 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002272 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002273}
2274
Avi Kivity70c68e42012-01-02 12:32:48 +02002275static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002276 .read_with_attrs = subpage_read,
2277 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002278 .impl.min_access_size = 1,
2279 .impl.max_access_size = 8,
2280 .valid.min_access_size = 1,
2281 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002282 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002283 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002284};
2285
Anthony Liguoric227f092009-10-01 16:12:16 -05002286static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002287 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002288{
2289 int idx, eidx;
2290
2291 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2292 return -1;
2293 idx = SUBPAGE_IDX(start);
2294 eidx = SUBPAGE_IDX(end);
2295#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002296 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2297 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002298#endif
blueswir1db7b5422007-05-26 17:36:03 +00002299 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002300 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002301 }
2302
2303 return 0;
2304}
2305
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002306static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002307{
Anthony Liguoric227f092009-10-01 16:12:16 -05002308 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002309
Anthony Liguori7267c092011-08-20 22:09:37 -05002310 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002311
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002312 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002313 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002314 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002315 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002316 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002317#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002318 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2319 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002320#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002321 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002322
2323 return mmio;
2324}
2325
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002326static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2327 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002328{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002329 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002330 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002331 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002332 .mr = mr,
2333 .offset_within_address_space = 0,
2334 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002335 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002336 };
2337
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002338 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002339}
2340
Peter Maydella54c87b2016-01-21 14:15:05 +00002341MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002342{
Peter Maydella54c87b2016-01-21 14:15:05 +00002343 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2344 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002345 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002346 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002347
2348 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002349}
2350
Avi Kivitye9179ce2009-06-14 11:38:52 +03002351static void io_mem_init(void)
2352{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002353 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002354 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002355 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002356 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002357 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002358 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002359 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002360}
2361
Avi Kivityac1970f2012-10-03 16:22:53 +02002362static void mem_begin(MemoryListener *listener)
2363{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002364 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002365 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2366 uint16_t n;
2367
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002368 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002369 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002370 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002371 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002372 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002373 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002374 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002375 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002376
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002377 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002378 d->as = as;
2379 as->next_dispatch = d;
2380}
2381
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002382static void address_space_dispatch_free(AddressSpaceDispatch *d)
2383{
2384 phys_sections_free(&d->map);
2385 g_free(d);
2386}
2387
Paolo Bonzini00752702013-05-29 12:13:54 +02002388static void mem_commit(MemoryListener *listener)
2389{
2390 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002391 AddressSpaceDispatch *cur = as->dispatch;
2392 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002393
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002394 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002395
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002396 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002397 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002398 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002399 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002400}
2401
Avi Kivity1d711482012-10-02 18:54:45 +02002402static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002403{
Peter Maydell32857f42015-10-01 15:29:50 +01002404 CPUAddressSpace *cpuas;
2405 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002406
2407 /* since each CPU stores ram addresses in its TLB cache, we must
2408 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002409 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2410 cpu_reloading_memory_map();
2411 /* The CPU and TLB are protected by the iothread lock.
2412 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2413 * may have split the RCU critical section.
2414 */
2415 d = atomic_rcu_read(&cpuas->as->dispatch);
2416 cpuas->memory_dispatch = d;
2417 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002418}
2419
Avi Kivityac1970f2012-10-03 16:22:53 +02002420void address_space_init_dispatch(AddressSpace *as)
2421{
Paolo Bonzini00752702013-05-29 12:13:54 +02002422 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002423 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002424 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002425 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002426 .region_add = mem_add,
2427 .region_nop = mem_add,
2428 .priority = 0,
2429 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002430 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002431}
2432
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002433void address_space_unregister(AddressSpace *as)
2434{
2435 memory_listener_unregister(&as->dispatch_listener);
2436}
2437
Avi Kivity83f3c252012-10-07 12:59:55 +02002438void address_space_destroy_dispatch(AddressSpace *as)
2439{
2440 AddressSpaceDispatch *d = as->dispatch;
2441
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002442 atomic_rcu_set(&as->dispatch, NULL);
2443 if (d) {
2444 call_rcu(d, address_space_dispatch_free, rcu);
2445 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002446}
2447
Avi Kivity62152b82011-07-26 14:26:14 +03002448static void memory_map_init(void)
2449{
Anthony Liguori7267c092011-08-20 22:09:37 -05002450 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002451
Paolo Bonzini57271d62013-11-07 17:14:37 +01002452 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002453 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002454
Anthony Liguori7267c092011-08-20 22:09:37 -05002455 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002456 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2457 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002458 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002459}
2460
2461MemoryRegion *get_system_memory(void)
2462{
2463 return system_memory;
2464}
2465
Avi Kivity309cb472011-08-08 16:09:03 +03002466MemoryRegion *get_system_io(void)
2467{
2468 return system_io;
2469}
2470
pbrooke2eef172008-06-08 01:09:01 +00002471#endif /* !defined(CONFIG_USER_ONLY) */
2472
bellard13eb76e2004-01-24 15:23:36 +00002473/* physical memory access (slow version, mainly for debug) */
2474#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002475int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002476 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002477{
2478 int l, flags;
2479 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002480 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002481
2482 while (len > 0) {
2483 page = addr & TARGET_PAGE_MASK;
2484 l = (page + TARGET_PAGE_SIZE) - addr;
2485 if (l > len)
2486 l = len;
2487 flags = page_get_flags(page);
2488 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002489 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002490 if (is_write) {
2491 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002492 return -1;
bellard579a97f2007-11-11 14:26:47 +00002493 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002494 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002495 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002496 memcpy(p, buf, l);
2497 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002498 } else {
2499 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002500 return -1;
bellard579a97f2007-11-11 14:26:47 +00002501 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002502 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002503 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002504 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002505 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002506 }
2507 len -= l;
2508 buf += l;
2509 addr += l;
2510 }
Paul Brooka68fe892010-03-01 00:08:59 +00002511 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002512}
bellard8df1cd02005-01-28 22:37:22 +00002513
bellard13eb76e2004-01-24 15:23:36 +00002514#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002515
Paolo Bonzini845b6212015-03-23 11:45:53 +01002516static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002517 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002518{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002519 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2520 /* No early return if dirty_log_mask is or becomes 0, because
2521 * cpu_physical_memory_set_dirty_range will still call
2522 * xen_modified_memory.
2523 */
2524 if (dirty_log_mask) {
2525 dirty_log_mask =
2526 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002527 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002528 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2529 tb_invalidate_phys_range(addr, addr + length);
2530 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2531 }
2532 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002533}
2534
Richard Henderson23326162013-07-08 14:55:59 -07002535static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002536{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002537 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002538
2539 /* Regions are assumed to support 1-4 byte accesses unless
2540 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002541 if (access_size_max == 0) {
2542 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002543 }
Richard Henderson23326162013-07-08 14:55:59 -07002544
2545 /* Bound the maximum access by the alignment of the address. */
2546 if (!mr->ops->impl.unaligned) {
2547 unsigned align_size_max = addr & -addr;
2548 if (align_size_max != 0 && align_size_max < access_size_max) {
2549 access_size_max = align_size_max;
2550 }
2551 }
2552
2553 /* Don't attempt accesses larger than the maximum. */
2554 if (l > access_size_max) {
2555 l = access_size_max;
2556 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002557 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002558
2559 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002560}
2561
Jan Kiszka4840f102015-06-18 18:47:22 +02002562static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002563{
Jan Kiszka4840f102015-06-18 18:47:22 +02002564 bool unlocked = !qemu_mutex_iothread_locked();
2565 bool release_lock = false;
2566
2567 if (unlocked && mr->global_locking) {
2568 qemu_mutex_lock_iothread();
2569 unlocked = false;
2570 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002571 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002572 if (mr->flush_coalesced_mmio) {
2573 if (unlocked) {
2574 qemu_mutex_lock_iothread();
2575 }
2576 qemu_flush_coalesced_mmio_buffer();
2577 if (unlocked) {
2578 qemu_mutex_unlock_iothread();
2579 }
2580 }
2581
2582 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002583}
2584
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002585/* Called within RCU critical section. */
2586static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2587 MemTxAttrs attrs,
2588 const uint8_t *buf,
2589 int len, hwaddr addr1,
2590 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002591{
bellard13eb76e2004-01-24 15:23:36 +00002592 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002593 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002594 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002595 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002596
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002597 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002598 if (!memory_access_is_direct(mr, true)) {
2599 release_lock |= prepare_mmio_access(mr);
2600 l = memory_access_size(mr, l, addr1);
2601 /* XXX: could force current_cpu to NULL to avoid
2602 potential bugs */
2603 switch (l) {
2604 case 8:
2605 /* 64 bit write access */
2606 val = ldq_p(buf);
2607 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2608 attrs);
2609 break;
2610 case 4:
2611 /* 32 bit write access */
2612 val = ldl_p(buf);
2613 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2614 attrs);
2615 break;
2616 case 2:
2617 /* 16 bit write access */
2618 val = lduw_p(buf);
2619 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2620 attrs);
2621 break;
2622 case 1:
2623 /* 8 bit write access */
2624 val = ldub_p(buf);
2625 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2626 attrs);
2627 break;
2628 default:
2629 abort();
bellard13eb76e2004-01-24 15:23:36 +00002630 }
2631 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002632 addr1 += memory_region_get_ram_addr(mr);
2633 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002634 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002635 memcpy(ptr, buf, l);
2636 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002637 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002638
2639 if (release_lock) {
2640 qemu_mutex_unlock_iothread();
2641 release_lock = false;
2642 }
2643
bellard13eb76e2004-01-24 15:23:36 +00002644 len -= l;
2645 buf += l;
2646 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002647
2648 if (!len) {
2649 break;
2650 }
2651
2652 l = len;
2653 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002654 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002655
Peter Maydell3b643492015-04-26 16:49:23 +01002656 return result;
bellard13eb76e2004-01-24 15:23:36 +00002657}
bellard8df1cd02005-01-28 22:37:22 +00002658
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002659MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2660 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002661{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002662 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002663 hwaddr addr1;
2664 MemoryRegion *mr;
2665 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002666
2667 if (len > 0) {
2668 rcu_read_lock();
2669 l = len;
2670 mr = address_space_translate(as, addr, &addr1, &l, true);
2671 result = address_space_write_continue(as, addr, attrs, buf, len,
2672 addr1, l, mr);
2673 rcu_read_unlock();
2674 }
2675
2676 return result;
2677}
2678
2679/* Called within RCU critical section. */
2680MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2681 MemTxAttrs attrs, uint8_t *buf,
2682 int len, hwaddr addr1, hwaddr l,
2683 MemoryRegion *mr)
2684{
2685 uint8_t *ptr;
2686 uint64_t val;
2687 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002688 bool release_lock = false;
2689
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002690 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002691 if (!memory_access_is_direct(mr, false)) {
2692 /* I/O case */
2693 release_lock |= prepare_mmio_access(mr);
2694 l = memory_access_size(mr, l, addr1);
2695 switch (l) {
2696 case 8:
2697 /* 64 bit read access */
2698 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2699 attrs);
2700 stq_p(buf, val);
2701 break;
2702 case 4:
2703 /* 32 bit read access */
2704 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2705 attrs);
2706 stl_p(buf, val);
2707 break;
2708 case 2:
2709 /* 16 bit read access */
2710 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2711 attrs);
2712 stw_p(buf, val);
2713 break;
2714 case 1:
2715 /* 8 bit read access */
2716 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2717 attrs);
2718 stb_p(buf, val);
2719 break;
2720 default:
2721 abort();
2722 }
2723 } else {
2724 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002725 ptr = qemu_get_ram_ptr(mr->ram_block,
2726 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002727 memcpy(buf, ptr, l);
2728 }
2729
2730 if (release_lock) {
2731 qemu_mutex_unlock_iothread();
2732 release_lock = false;
2733 }
2734
2735 len -= l;
2736 buf += l;
2737 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002738
2739 if (!len) {
2740 break;
2741 }
2742
2743 l = len;
2744 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002745 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002746
2747 return result;
2748}
2749
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002750MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2751 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002752{
2753 hwaddr l;
2754 hwaddr addr1;
2755 MemoryRegion *mr;
2756 MemTxResult result = MEMTX_OK;
2757
2758 if (len > 0) {
2759 rcu_read_lock();
2760 l = len;
2761 mr = address_space_translate(as, addr, &addr1, &l, false);
2762 result = address_space_read_continue(as, addr, attrs, buf, len,
2763 addr1, l, mr);
2764 rcu_read_unlock();
2765 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002766
2767 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002768}
2769
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002770MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2771 uint8_t *buf, int len, bool is_write)
2772{
2773 if (is_write) {
2774 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2775 } else {
2776 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2777 }
2778}
Avi Kivityac1970f2012-10-03 16:22:53 +02002779
Avi Kivitya8170e52012-10-23 12:30:10 +02002780void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002781 int len, int is_write)
2782{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002783 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2784 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002785}
2786
Alexander Graf582b55a2013-12-11 14:17:44 +01002787enum write_rom_type {
2788 WRITE_DATA,
2789 FLUSH_CACHE,
2790};
2791
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002792static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002793 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002794{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002795 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002796 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002797 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002798 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002799
Paolo Bonzini41063e12015-03-18 14:21:43 +01002800 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002801 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002802 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002803 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002804
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002805 if (!(memory_region_is_ram(mr) ||
2806 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002807 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002808 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002809 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002810 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002811 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002812 switch (type) {
2813 case WRITE_DATA:
2814 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002815 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002816 break;
2817 case FLUSH_CACHE:
2818 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2819 break;
2820 }
bellardd0ecd2a2006-04-23 17:14:48 +00002821 }
2822 len -= l;
2823 buf += l;
2824 addr += l;
2825 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002826 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002827}
2828
Alexander Graf582b55a2013-12-11 14:17:44 +01002829/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002830void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002831 const uint8_t *buf, int len)
2832{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002833 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002834}
2835
2836void cpu_flush_icache_range(hwaddr start, int len)
2837{
2838 /*
2839 * This function should do the same thing as an icache flush that was
2840 * triggered from within the guest. For TCG we are always cache coherent,
2841 * so there is no need to flush anything. For KVM / Xen we need to flush
2842 * the host's instruction cache at least.
2843 */
2844 if (tcg_enabled()) {
2845 return;
2846 }
2847
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002848 cpu_physical_memory_write_rom_internal(&address_space_memory,
2849 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002850}
2851
aliguori6d16c2f2009-01-22 16:59:11 +00002852typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002853 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002854 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002855 hwaddr addr;
2856 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002857 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002858} BounceBuffer;
2859
2860static BounceBuffer bounce;
2861
aliguoriba223c22009-01-22 16:59:16 +00002862typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002863 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002864 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002865} MapClient;
2866
Fam Zheng38e047b2015-03-16 17:03:35 +08002867QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002868static QLIST_HEAD(map_client_list, MapClient) map_client_list
2869 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002870
Fam Zhenge95205e2015-03-16 17:03:37 +08002871static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002872{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002873 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002874 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002875}
2876
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002877static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002878{
2879 MapClient *client;
2880
Blue Swirl72cf2d42009-09-12 07:36:22 +00002881 while (!QLIST_EMPTY(&map_client_list)) {
2882 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002883 qemu_bh_schedule(client->bh);
2884 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002885 }
2886}
2887
Fam Zhenge95205e2015-03-16 17:03:37 +08002888void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002889{
2890 MapClient *client = g_malloc(sizeof(*client));
2891
Fam Zheng38e047b2015-03-16 17:03:35 +08002892 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002893 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002894 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002895 if (!atomic_read(&bounce.in_use)) {
2896 cpu_notify_map_clients_locked();
2897 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002898 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002899}
2900
Fam Zheng38e047b2015-03-16 17:03:35 +08002901void cpu_exec_init_all(void)
2902{
2903 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002904 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002905 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002906 qemu_mutex_init(&map_client_list_lock);
2907}
2908
Fam Zhenge95205e2015-03-16 17:03:37 +08002909void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002910{
Fam Zhenge95205e2015-03-16 17:03:37 +08002911 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002912
Fam Zhenge95205e2015-03-16 17:03:37 +08002913 qemu_mutex_lock(&map_client_list_lock);
2914 QLIST_FOREACH(client, &map_client_list, link) {
2915 if (client->bh == bh) {
2916 cpu_unregister_map_client_do(client);
2917 break;
2918 }
2919 }
2920 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002921}
2922
2923static void cpu_notify_map_clients(void)
2924{
Fam Zheng38e047b2015-03-16 17:03:35 +08002925 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002926 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002927 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002928}
2929
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002930bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2931{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002932 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002933 hwaddr l, xlat;
2934
Paolo Bonzini41063e12015-03-18 14:21:43 +01002935 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002936 while (len > 0) {
2937 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002938 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2939 if (!memory_access_is_direct(mr, is_write)) {
2940 l = memory_access_size(mr, l, addr);
2941 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002942 return false;
2943 }
2944 }
2945
2946 len -= l;
2947 addr += l;
2948 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002949 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002950 return true;
2951}
2952
aliguori6d16c2f2009-01-22 16:59:11 +00002953/* Map a physical memory region into a host virtual address.
2954 * May map a subset of the requested range, given by and returned in *plen.
2955 * May return NULL if resources needed to perform the mapping are exhausted.
2956 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002957 * Use cpu_register_map_client() to know when retrying the map operation is
2958 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002959 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002960void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002961 hwaddr addr,
2962 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002963 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002964{
Avi Kivitya8170e52012-10-23 12:30:10 +02002965 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002966 hwaddr done = 0;
2967 hwaddr l, xlat, base;
2968 MemoryRegion *mr, *this_mr;
2969 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002970 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002971
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002972 if (len == 0) {
2973 return NULL;
2974 }
aliguori6d16c2f2009-01-22 16:59:11 +00002975
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002976 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002977 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002978 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002979
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002980 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002981 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002982 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002983 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002984 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002985 /* Avoid unbounded allocations */
2986 l = MIN(l, TARGET_PAGE_SIZE);
2987 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002988 bounce.addr = addr;
2989 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002990
2991 memory_region_ref(mr);
2992 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002993 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002994 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2995 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002996 }
aliguori6d16c2f2009-01-22 16:59:11 +00002997
Paolo Bonzini41063e12015-03-18 14:21:43 +01002998 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002999 *plen = l;
3000 return bounce.buffer;
3001 }
3002
3003 base = xlat;
3004 raddr = memory_region_get_ram_addr(mr);
3005
3006 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00003007 len -= l;
3008 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003009 done += l;
3010 if (len == 0) {
3011 break;
3012 }
3013
3014 l = len;
3015 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3016 if (this_mr != mr || xlat != base + done) {
3017 break;
3018 }
aliguori6d16c2f2009-01-22 16:59:11 +00003019 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003020
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003021 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003022 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003023 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003024 rcu_read_unlock();
3025
3026 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003027}
3028
Avi Kivityac1970f2012-10-03 16:22:53 +02003029/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003030 * Will also mark the memory as dirty if is_write == 1. access_len gives
3031 * the amount of memory that was actually read or written by the caller.
3032 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003033void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3034 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003035{
3036 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003037 MemoryRegion *mr;
3038 ram_addr_t addr1;
3039
3040 mr = qemu_ram_addr_from_host(buffer, &addr1);
3041 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003042 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003043 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003044 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003045 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003046 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003047 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003048 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003049 return;
3050 }
3051 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003052 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3053 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003054 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003055 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003056 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003057 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003058 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003059 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003060}
bellardd0ecd2a2006-04-23 17:14:48 +00003061
Avi Kivitya8170e52012-10-23 12:30:10 +02003062void *cpu_physical_memory_map(hwaddr addr,
3063 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003064 int is_write)
3065{
3066 return address_space_map(&address_space_memory, addr, plen, is_write);
3067}
3068
Avi Kivitya8170e52012-10-23 12:30:10 +02003069void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3070 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003071{
3072 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3073}
3074
bellard8df1cd02005-01-28 22:37:22 +00003075/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003076static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3077 MemTxAttrs attrs,
3078 MemTxResult *result,
3079 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003080{
bellard8df1cd02005-01-28 22:37:22 +00003081 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003082 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003083 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003084 hwaddr l = 4;
3085 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003086 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003087 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003088
Paolo Bonzini41063e12015-03-18 14:21:43 +01003089 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003090 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003091 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003092 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003093
bellard8df1cd02005-01-28 22:37:22 +00003094 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003095 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003096#if defined(TARGET_WORDS_BIGENDIAN)
3097 if (endian == DEVICE_LITTLE_ENDIAN) {
3098 val = bswap32(val);
3099 }
3100#else
3101 if (endian == DEVICE_BIG_ENDIAN) {
3102 val = bswap32(val);
3103 }
3104#endif
bellard8df1cd02005-01-28 22:37:22 +00003105 } else {
3106 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003107 ptr = qemu_get_ram_ptr(mr->ram_block,
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003108 memory_region_get_ram_addr(mr) + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003109 switch (endian) {
3110 case DEVICE_LITTLE_ENDIAN:
3111 val = ldl_le_p(ptr);
3112 break;
3113 case DEVICE_BIG_ENDIAN:
3114 val = ldl_be_p(ptr);
3115 break;
3116 default:
3117 val = ldl_p(ptr);
3118 break;
3119 }
Peter Maydell50013112015-04-26 16:49:24 +01003120 r = MEMTX_OK;
3121 }
3122 if (result) {
3123 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003124 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003125 if (release_lock) {
3126 qemu_mutex_unlock_iothread();
3127 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003128 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003129 return val;
3130}
3131
Peter Maydell50013112015-04-26 16:49:24 +01003132uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3133 MemTxAttrs attrs, MemTxResult *result)
3134{
3135 return address_space_ldl_internal(as, addr, attrs, result,
3136 DEVICE_NATIVE_ENDIAN);
3137}
3138
3139uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3140 MemTxAttrs attrs, MemTxResult *result)
3141{
3142 return address_space_ldl_internal(as, addr, attrs, result,
3143 DEVICE_LITTLE_ENDIAN);
3144}
3145
3146uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3147 MemTxAttrs attrs, MemTxResult *result)
3148{
3149 return address_space_ldl_internal(as, addr, attrs, result,
3150 DEVICE_BIG_ENDIAN);
3151}
3152
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003153uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003154{
Peter Maydell50013112015-04-26 16:49:24 +01003155 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003156}
3157
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003158uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003159{
Peter Maydell50013112015-04-26 16:49:24 +01003160 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003161}
3162
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003163uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003164{
Peter Maydell50013112015-04-26 16:49:24 +01003165 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003166}
3167
bellard84b7b8e2005-11-28 21:19:04 +00003168/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003169static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3170 MemTxAttrs attrs,
3171 MemTxResult *result,
3172 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003173{
bellard84b7b8e2005-11-28 21:19:04 +00003174 uint8_t *ptr;
3175 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003176 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003177 hwaddr l = 8;
3178 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003179 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003180 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003181
Paolo Bonzini41063e12015-03-18 14:21:43 +01003182 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003183 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003184 false);
3185 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003186 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003187
bellard84b7b8e2005-11-28 21:19:04 +00003188 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003189 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003190#if defined(TARGET_WORDS_BIGENDIAN)
3191 if (endian == DEVICE_LITTLE_ENDIAN) {
3192 val = bswap64(val);
3193 }
3194#else
3195 if (endian == DEVICE_BIG_ENDIAN) {
3196 val = bswap64(val);
3197 }
3198#endif
bellard84b7b8e2005-11-28 21:19:04 +00003199 } else {
3200 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003201 ptr = qemu_get_ram_ptr(mr->ram_block,
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003202 memory_region_get_ram_addr(mr) + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003203 switch (endian) {
3204 case DEVICE_LITTLE_ENDIAN:
3205 val = ldq_le_p(ptr);
3206 break;
3207 case DEVICE_BIG_ENDIAN:
3208 val = ldq_be_p(ptr);
3209 break;
3210 default:
3211 val = ldq_p(ptr);
3212 break;
3213 }
Peter Maydell50013112015-04-26 16:49:24 +01003214 r = MEMTX_OK;
3215 }
3216 if (result) {
3217 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003218 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003219 if (release_lock) {
3220 qemu_mutex_unlock_iothread();
3221 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003222 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003223 return val;
3224}
3225
Peter Maydell50013112015-04-26 16:49:24 +01003226uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3227 MemTxAttrs attrs, MemTxResult *result)
3228{
3229 return address_space_ldq_internal(as, addr, attrs, result,
3230 DEVICE_NATIVE_ENDIAN);
3231}
3232
3233uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3234 MemTxAttrs attrs, MemTxResult *result)
3235{
3236 return address_space_ldq_internal(as, addr, attrs, result,
3237 DEVICE_LITTLE_ENDIAN);
3238}
3239
3240uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3241 MemTxAttrs attrs, MemTxResult *result)
3242{
3243 return address_space_ldq_internal(as, addr, attrs, result,
3244 DEVICE_BIG_ENDIAN);
3245}
3246
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003247uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248{
Peter Maydell50013112015-04-26 16:49:24 +01003249 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250}
3251
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003252uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003253{
Peter Maydell50013112015-04-26 16:49:24 +01003254 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003255}
3256
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003257uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003258{
Peter Maydell50013112015-04-26 16:49:24 +01003259 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003260}
3261
bellardaab33092005-10-30 20:48:42 +00003262/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003263uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3264 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003265{
3266 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003267 MemTxResult r;
3268
3269 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3270 if (result) {
3271 *result = r;
3272 }
bellardaab33092005-10-30 20:48:42 +00003273 return val;
3274}
3275
Peter Maydell50013112015-04-26 16:49:24 +01003276uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3277{
3278 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3279}
3280
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003281/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003282static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3283 hwaddr addr,
3284 MemTxAttrs attrs,
3285 MemTxResult *result,
3286 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003287{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003288 uint8_t *ptr;
3289 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003290 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003291 hwaddr l = 2;
3292 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003293 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003294 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003295
Paolo Bonzini41063e12015-03-18 14:21:43 +01003296 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003297 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003298 false);
3299 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003300 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003301
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003302 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003303 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003304#if defined(TARGET_WORDS_BIGENDIAN)
3305 if (endian == DEVICE_LITTLE_ENDIAN) {
3306 val = bswap16(val);
3307 }
3308#else
3309 if (endian == DEVICE_BIG_ENDIAN) {
3310 val = bswap16(val);
3311 }
3312#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003313 } else {
3314 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003315 ptr = qemu_get_ram_ptr(mr->ram_block,
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003316 memory_region_get_ram_addr(mr) + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003317 switch (endian) {
3318 case DEVICE_LITTLE_ENDIAN:
3319 val = lduw_le_p(ptr);
3320 break;
3321 case DEVICE_BIG_ENDIAN:
3322 val = lduw_be_p(ptr);
3323 break;
3324 default:
3325 val = lduw_p(ptr);
3326 break;
3327 }
Peter Maydell50013112015-04-26 16:49:24 +01003328 r = MEMTX_OK;
3329 }
3330 if (result) {
3331 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003332 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003333 if (release_lock) {
3334 qemu_mutex_unlock_iothread();
3335 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003336 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003337 return val;
bellardaab33092005-10-30 20:48:42 +00003338}
3339
Peter Maydell50013112015-04-26 16:49:24 +01003340uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3341 MemTxAttrs attrs, MemTxResult *result)
3342{
3343 return address_space_lduw_internal(as, addr, attrs, result,
3344 DEVICE_NATIVE_ENDIAN);
3345}
3346
3347uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3348 MemTxAttrs attrs, MemTxResult *result)
3349{
3350 return address_space_lduw_internal(as, addr, attrs, result,
3351 DEVICE_LITTLE_ENDIAN);
3352}
3353
3354uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3355 MemTxAttrs attrs, MemTxResult *result)
3356{
3357 return address_space_lduw_internal(as, addr, attrs, result,
3358 DEVICE_BIG_ENDIAN);
3359}
3360
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003361uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003362{
Peter Maydell50013112015-04-26 16:49:24 +01003363 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003364}
3365
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003366uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003367{
Peter Maydell50013112015-04-26 16:49:24 +01003368 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003369}
3370
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003371uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003372{
Peter Maydell50013112015-04-26 16:49:24 +01003373 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003374}
3375
bellard8df1cd02005-01-28 22:37:22 +00003376/* warning: addr must be aligned. The ram page is not masked as dirty
3377 and the code inside is not invalidated. It is useful if the dirty
3378 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003379void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3380 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003381{
bellard8df1cd02005-01-28 22:37:22 +00003382 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003383 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003384 hwaddr l = 4;
3385 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003386 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003387 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003388 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003389
Paolo Bonzini41063e12015-03-18 14:21:43 +01003390 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003391 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003392 true);
3393 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003394 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003395
Peter Maydell50013112015-04-26 16:49:24 +01003396 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003397 } else {
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003398 addr1 += memory_region_get_ram_addr(mr);
Gonglei3655cb92016-02-20 10:35:20 +08003399 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003400 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003401
Paolo Bonzini845b6212015-03-23 11:45:53 +01003402 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3403 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003404 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003405 r = MEMTX_OK;
3406 }
3407 if (result) {
3408 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003409 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003410 if (release_lock) {
3411 qemu_mutex_unlock_iothread();
3412 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003413 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003414}
3415
Peter Maydell50013112015-04-26 16:49:24 +01003416void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3417{
3418 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3419}
3420
bellard8df1cd02005-01-28 22:37:22 +00003421/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003422static inline void address_space_stl_internal(AddressSpace *as,
3423 hwaddr addr, uint32_t val,
3424 MemTxAttrs attrs,
3425 MemTxResult *result,
3426 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003427{
bellard8df1cd02005-01-28 22:37:22 +00003428 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003429 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003430 hwaddr l = 4;
3431 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003432 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003433 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003434
Paolo Bonzini41063e12015-03-18 14:21:43 +01003435 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003436 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003437 true);
3438 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003439 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003440
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003441#if defined(TARGET_WORDS_BIGENDIAN)
3442 if (endian == DEVICE_LITTLE_ENDIAN) {
3443 val = bswap32(val);
3444 }
3445#else
3446 if (endian == DEVICE_BIG_ENDIAN) {
3447 val = bswap32(val);
3448 }
3449#endif
Peter Maydell50013112015-04-26 16:49:24 +01003450 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003451 } else {
bellard8df1cd02005-01-28 22:37:22 +00003452 /* RAM case */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003453 addr1 += memory_region_get_ram_addr(mr);
Gonglei3655cb92016-02-20 10:35:20 +08003454 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003455 switch (endian) {
3456 case DEVICE_LITTLE_ENDIAN:
3457 stl_le_p(ptr, val);
3458 break;
3459 case DEVICE_BIG_ENDIAN:
3460 stl_be_p(ptr, val);
3461 break;
3462 default:
3463 stl_p(ptr, val);
3464 break;
3465 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003466 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003467 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003468 }
Peter Maydell50013112015-04-26 16:49:24 +01003469 if (result) {
3470 *result = r;
3471 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003472 if (release_lock) {
3473 qemu_mutex_unlock_iothread();
3474 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003475 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003476}
3477
3478void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3479 MemTxAttrs attrs, MemTxResult *result)
3480{
3481 address_space_stl_internal(as, addr, val, attrs, result,
3482 DEVICE_NATIVE_ENDIAN);
3483}
3484
3485void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3486 MemTxAttrs attrs, MemTxResult *result)
3487{
3488 address_space_stl_internal(as, addr, val, attrs, result,
3489 DEVICE_LITTLE_ENDIAN);
3490}
3491
3492void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3493 MemTxAttrs attrs, MemTxResult *result)
3494{
3495 address_space_stl_internal(as, addr, val, attrs, result,
3496 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003497}
3498
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003499void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003500{
Peter Maydell50013112015-04-26 16:49:24 +01003501 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502}
3503
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003504void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003505{
Peter Maydell50013112015-04-26 16:49:24 +01003506 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507}
3508
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003509void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003510{
Peter Maydell50013112015-04-26 16:49:24 +01003511 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003512}
3513
bellardaab33092005-10-30 20:48:42 +00003514/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003515void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3516 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003517{
3518 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003519 MemTxResult r;
3520
3521 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3522 if (result) {
3523 *result = r;
3524 }
3525}
3526
3527void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3528{
3529 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003530}
3531
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003532/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003533static inline void address_space_stw_internal(AddressSpace *as,
3534 hwaddr addr, uint32_t val,
3535 MemTxAttrs attrs,
3536 MemTxResult *result,
3537 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003538{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003539 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003540 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003541 hwaddr l = 2;
3542 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003543 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003544 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003545
Paolo Bonzini41063e12015-03-18 14:21:43 +01003546 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003547 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003548 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003549 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003550
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003551#if defined(TARGET_WORDS_BIGENDIAN)
3552 if (endian == DEVICE_LITTLE_ENDIAN) {
3553 val = bswap16(val);
3554 }
3555#else
3556 if (endian == DEVICE_BIG_ENDIAN) {
3557 val = bswap16(val);
3558 }
3559#endif
Peter Maydell50013112015-04-26 16:49:24 +01003560 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003561 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003562 /* RAM case */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01003563 addr1 += memory_region_get_ram_addr(mr);
Gonglei3655cb92016-02-20 10:35:20 +08003564 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003565 switch (endian) {
3566 case DEVICE_LITTLE_ENDIAN:
3567 stw_le_p(ptr, val);
3568 break;
3569 case DEVICE_BIG_ENDIAN:
3570 stw_be_p(ptr, val);
3571 break;
3572 default:
3573 stw_p(ptr, val);
3574 break;
3575 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003576 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003577 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003578 }
Peter Maydell50013112015-04-26 16:49:24 +01003579 if (result) {
3580 *result = r;
3581 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003582 if (release_lock) {
3583 qemu_mutex_unlock_iothread();
3584 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003585 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003586}
3587
3588void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3589 MemTxAttrs attrs, MemTxResult *result)
3590{
3591 address_space_stw_internal(as, addr, val, attrs, result,
3592 DEVICE_NATIVE_ENDIAN);
3593}
3594
3595void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3596 MemTxAttrs attrs, MemTxResult *result)
3597{
3598 address_space_stw_internal(as, addr, val, attrs, result,
3599 DEVICE_LITTLE_ENDIAN);
3600}
3601
3602void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3603 MemTxAttrs attrs, MemTxResult *result)
3604{
3605 address_space_stw_internal(as, addr, val, attrs, result,
3606 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003607}
3608
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003609void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003610{
Peter Maydell50013112015-04-26 16:49:24 +01003611 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003612}
3613
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003614void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003615{
Peter Maydell50013112015-04-26 16:49:24 +01003616 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003617}
3618
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003619void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003620{
Peter Maydell50013112015-04-26 16:49:24 +01003621 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003622}
3623
bellardaab33092005-10-30 20:48:42 +00003624/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003625void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3626 MemTxAttrs attrs, MemTxResult *result)
3627{
3628 MemTxResult r;
3629 val = tswap64(val);
3630 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3631 if (result) {
3632 *result = r;
3633 }
3634}
3635
3636void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3637 MemTxAttrs attrs, MemTxResult *result)
3638{
3639 MemTxResult r;
3640 val = cpu_to_le64(val);
3641 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3642 if (result) {
3643 *result = r;
3644 }
3645}
3646void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3647 MemTxAttrs attrs, MemTxResult *result)
3648{
3649 MemTxResult r;
3650 val = cpu_to_be64(val);
3651 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3652 if (result) {
3653 *result = r;
3654 }
3655}
3656
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003657void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003658{
Peter Maydell50013112015-04-26 16:49:24 +01003659 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003660}
3661
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003662void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003663{
Peter Maydell50013112015-04-26 16:49:24 +01003664 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003665}
3666
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003667void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003668{
Peter Maydell50013112015-04-26 16:49:24 +01003669 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003670}
3671
aliguori5e2972f2009-03-28 17:51:36 +00003672/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003673int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003674 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003675{
3676 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003677 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003678 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003679
3680 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003681 int asidx;
3682 MemTxAttrs attrs;
3683
bellard13eb76e2004-01-24 15:23:36 +00003684 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003685 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3686 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003687 /* if no physical page mapped, return an error */
3688 if (phys_addr == -1)
3689 return -1;
3690 l = (page + TARGET_PAGE_SIZE) - addr;
3691 if (l > len)
3692 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003693 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003694 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003695 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3696 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003697 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003698 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3699 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003700 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003701 }
bellard13eb76e2004-01-24 15:23:36 +00003702 len -= l;
3703 buf += l;
3704 addr += l;
3705 }
3706 return 0;
3707}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003708
3709/*
3710 * Allows code that needs to deal with migration bitmaps etc to still be built
3711 * target independent.
3712 */
3713size_t qemu_target_page_bits(void)
3714{
3715 return TARGET_PAGE_BITS;
3716}
3717
Paul Brooka68fe892010-03-01 00:08:59 +00003718#endif
bellard13eb76e2004-01-24 15:23:36 +00003719
Blue Swirl8e4a4242013-01-06 18:30:17 +00003720/*
3721 * A helper function for the _utterly broken_ virtio device model to find out if
3722 * it's running on a big endian machine. Don't do this at home kids!
3723 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003724bool target_words_bigendian(void);
3725bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003726{
3727#if defined(TARGET_WORDS_BIGENDIAN)
3728 return true;
3729#else
3730 return false;
3731#endif
3732}
3733
Wen Congyang76f35532012-05-07 12:04:18 +08003734#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003735bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003736{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003737 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003738 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003739 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003740
Paolo Bonzini41063e12015-03-18 14:21:43 +01003741 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003742 mr = address_space_translate(&address_space_memory,
3743 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003744
Paolo Bonzini41063e12015-03-18 14:21:43 +01003745 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3746 rcu_read_unlock();
3747 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003748}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003749
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003750int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003751{
3752 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003753 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003754
Mike Day0dc3f442013-09-05 14:41:35 -04003755 rcu_read_lock();
3756 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003757 ret = func(block->idstr, block->host, block->offset,
3758 block->used_length, opaque);
3759 if (ret) {
3760 break;
3761 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003762 }
Mike Day0dc3f442013-09-05 14:41:35 -04003763 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003764 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003765}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003766#endif