blob: ed10ba1af1c19e58abe145d09f258cd21c474396 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020025#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020033#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
43#include "sysemu/dma.h"
44#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010045#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010046#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000047#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010048#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040049#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020050#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000051#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030052#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030056#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
pbrooke2eef172008-06-08 01:09:01 +000091#endif
bellard9fa3e852004-01-04 18:06:42 +000092
Andreas Färberbdc44642013-06-24 23:50:24 +020093struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000094/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020096__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000097/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000098 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000099 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100100int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000101
pbrooke2eef172008-06-08 01:09:01 +0000102#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200103
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200111};
112
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100116#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200118#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100126 struct rcu_head rcu;
127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100137 struct rcu_head rcu;
138
Fam Zheng729633c2016-03-01 14:18:24 +0800139 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200140 /* This is a multi-level map on the physical address space.
141 * The bottom level has pointers to MemoryRegionSections.
142 */
143 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200144 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200145 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200146};
147
Jan Kiszka90260c62013-05-26 21:46:51 +0200148#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
149typedef struct subpage_t {
150 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200151 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200152 hwaddr base;
153 uint16_t sub_section[TARGET_PAGE_SIZE];
154} subpage_t;
155
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200156#define PHYS_SECTION_UNASSIGNED 0
157#define PHYS_SECTION_NOTDIRTY 1
158#define PHYS_SECTION_ROM 2
159#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200160
pbrooke2eef172008-06-08 01:09:01 +0000161static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300162static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000163static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000164
Avi Kivity1ec9b902012-01-02 12:47:48 +0200165static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100166
167/**
168 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
169 * @cpu: the CPU whose AddressSpace this is
170 * @as: the AddressSpace itself
171 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
172 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 */
174struct CPUAddressSpace {
175 CPUState *cpu;
176 AddressSpace *as;
177 struct AddressSpaceDispatch *memory_dispatch;
178 MemoryListener tcg_as_listener;
179};
180
pbrook6658ffb2007-03-16 23:58:11 +0000181#endif
bellard54936002003-05-13 00:25:15 +0000182
Paul Brook6d9a1302010-02-28 23:55:53 +0000183#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
189 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
190 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191 }
192}
193
Paolo Bonzinidb946042015-05-21 15:12:29 +0200194static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200195{
196 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200197 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200198 PhysPageEntry e;
199 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200201 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200202 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200204 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205
206 e.skip = leaf ? 0 : 1;
207 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200209 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200211 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212}
213
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200214static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
215 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200216 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200217{
218 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100219 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200220
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200221 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200222 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100225 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200226
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200228 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200229 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200230 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200231 *index += step;
232 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200233 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200234 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200235 }
236 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200237 }
238}
239
Avi Kivityac1970f2012-10-03 16:22:53 +0200240static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200241 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200242 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000243{
Avi Kivity29990972012-02-13 20:21:20 +0200244 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200245 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000246
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000248}
249
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200250/* Compact a non leaf page entry. Simply detect that the entry has a single child,
251 * and update our entry so we can skip it and go directly to the destination.
252 */
253static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
254{
255 unsigned valid_ptr = P_L2_SIZE;
256 int valid = 0;
257 PhysPageEntry *p;
258 int i;
259
260 if (lp->ptr == PHYS_MAP_NODE_NIL) {
261 return;
262 }
263
264 p = nodes[lp->ptr];
265 for (i = 0; i < P_L2_SIZE; i++) {
266 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
267 continue;
268 }
269
270 valid_ptr = i;
271 valid++;
272 if (p[i].skip) {
273 phys_page_compact(&p[i], nodes, compacted);
274 }
275 }
276
277 /* We can only compress if there's only one child. */
278 if (valid != 1) {
279 return;
280 }
281
282 assert(valid_ptr < P_L2_SIZE);
283
284 /* Don't compress if it won't fit in the # of bits we have. */
285 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
286 return;
287 }
288
289 lp->ptr = p[valid_ptr].ptr;
290 if (!p[valid_ptr].skip) {
291 /* If our only child is a leaf, make this a leaf. */
292 /* By design, we should have made this node a leaf to begin with so we
293 * should never reach here.
294 * But since it's so simple to handle this, let's do it just in case we
295 * change this rule.
296 */
297 lp->skip = 0;
298 } else {
299 lp->skip += p[valid_ptr].skip;
300 }
301}
302
303static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
304{
305 DECLARE_BITMAP(compacted, nodes_nb);
306
307 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200308 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200309 }
310}
311
Fam Zheng29cb5332016-03-01 14:18:23 +0800312static inline bool section_covers_addr(const MemoryRegionSection *section,
313 hwaddr addr)
314{
315 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
316 * the section must cover the entire address space.
317 */
318 return section->size.hi ||
319 range_covers_byte(section->offset_within_address_space,
320 section->size.lo, addr);
321}
322
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200323static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200324 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000325{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200326 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200327 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200328 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200329
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200330 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200331 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200332 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200334 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100335 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200336 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200337
Fam Zheng29cb5332016-03-01 14:18:23 +0800338 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200339 return &sections[lp.ptr];
340 } else {
341 return &sections[PHYS_SECTION_UNASSIGNED];
342 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200343}
344
Blue Swirle5548612012-04-21 13:08:33 +0000345bool memory_region_is_unassigned(MemoryRegion *mr)
346{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200347 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000348 && mr != &io_mem_watch;
349}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200350
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100351/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200352static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 hwaddr addr,
354 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200355{
Fam Zheng729633c2016-03-01 14:18:24 +0800356 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200357 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800358 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200359
Fam Zheng729633c2016-03-01 14:18:24 +0800360 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
361 section_covers_addr(section, addr)) {
362 update = false;
363 } else {
364 section = phys_page_find(d->phys_map, addr, d->map.nodes,
365 d->map.sections);
366 update = true;
367 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200368 if (resolve_subpage && section->mr->subpage) {
369 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200370 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200371 }
Fam Zheng729633c2016-03-01 14:18:24 +0800372 if (update) {
373 atomic_set(&d->mru_section, section);
374 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200375 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200376}
377
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100378/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200379static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200380address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200381 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200382{
383 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200384 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100385 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200386
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200387 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200388 /* Compute offset within MemoryRegionSection */
389 addr -= section->offset_within_address_space;
390
391 /* Compute offset within MemoryRegion */
392 *xlat = addr + section->offset_within_region;
393
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200394 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200395
396 /* MMIO registers can be expected to perform full-width accesses based only
397 * on their address, without considering adjacent registers that could
398 * decode to completely different MemoryRegions. When such registers
399 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
400 * regions overlap wildly. For this reason we cannot clamp the accesses
401 * here.
402 *
403 * If the length is small (as is the case for address_space_ldl/stl),
404 * everything works fine. If the incoming length is large, however,
405 * the caller really has to do the clamping through memory_access_size.
406 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200407 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200408 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200409 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
410 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200411 return section;
412}
Jan Kiszka90260c62013-05-26 21:46:51 +0200413
Paolo Bonzini41063e12015-03-18 14:21:43 +0100414/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200415MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
416 hwaddr *xlat, hwaddr *plen,
417 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200418{
Avi Kivity30951152012-10-30 13:47:46 +0200419 IOMMUTLBEntry iotlb;
420 MemoryRegionSection *section;
421 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200422
423 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100424 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
425 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200426 mr = section->mr;
427
428 if (!mr->iommu_ops) {
429 break;
430 }
431
Le Tan8d7b8cb2014-08-16 13:55:37 +0800432 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200433 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
434 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700435 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200436 if (!(iotlb.perm & (1 << is_write))) {
437 mr = &io_mem_unassigned;
438 break;
439 }
440
441 as = iotlb.target_as;
442 }
443
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000444 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100445 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700446 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100447 }
448
Avi Kivity30951152012-10-30 13:47:46 +0200449 *xlat = addr;
450 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200451}
452
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100453/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200454MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000455address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200456 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200457{
Avi Kivity30951152012-10-30 13:47:46 +0200458 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000459 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
460
461 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200462
463 assert(!section->mr->iommu_ops);
464 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200465}
bellard9fa3e852004-01-04 18:06:42 +0000466#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000467
Andreas Färberb170fce2013-01-20 20:23:22 +0100468#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000469
Juan Quintelae59fb372009-09-29 22:48:21 +0200470static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200471{
Andreas Färber259186a2013-01-17 18:51:17 +0100472 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200473
aurel323098dba2009-03-07 21:28:24 +0000474 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
475 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100476 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100477 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000478
479 return 0;
480}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200481
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400482static int cpu_common_pre_load(void *opaque)
483{
484 CPUState *cpu = opaque;
485
Paolo Bonziniadee6422014-12-19 12:53:14 +0100486 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487
488 return 0;
489}
490
491static bool cpu_common_exception_index_needed(void *opaque)
492{
493 CPUState *cpu = opaque;
494
Paolo Bonziniadee6422014-12-19 12:53:14 +0100495 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400496}
497
498static const VMStateDescription vmstate_cpu_common_exception_index = {
499 .name = "cpu_common/exception_index",
500 .version_id = 1,
501 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200502 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400503 .fields = (VMStateField[]) {
504 VMSTATE_INT32(exception_index, CPUState),
505 VMSTATE_END_OF_LIST()
506 }
507};
508
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300509static bool cpu_common_crash_occurred_needed(void *opaque)
510{
511 CPUState *cpu = opaque;
512
513 return cpu->crash_occurred;
514}
515
516static const VMStateDescription vmstate_cpu_common_crash_occurred = {
517 .name = "cpu_common/crash_occurred",
518 .version_id = 1,
519 .minimum_version_id = 1,
520 .needed = cpu_common_crash_occurred_needed,
521 .fields = (VMStateField[]) {
522 VMSTATE_BOOL(crash_occurred, CPUState),
523 VMSTATE_END_OF_LIST()
524 }
525};
526
Andreas Färber1a1562f2013-06-17 04:09:11 +0200527const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200528 .name = "cpu_common",
529 .version_id = 1,
530 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400531 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200532 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200533 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100534 VMSTATE_UINT32(halted, CPUState),
535 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200536 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400537 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200538 .subsections = (const VMStateDescription*[]) {
539 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300540 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200541 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200542 }
543};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200544
pbrook9656f322008-07-01 20:01:19 +0000545#endif
546
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100547CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400548{
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400550
Andreas Färberbdc44642013-06-24 23:50:24 +0200551 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100552 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200553 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100554 }
Glauber Costa950f1472009-06-09 12:15:18 -0400555 }
556
Andreas Färberbdc44642013-06-24 23:50:24 +0200557 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400558}
559
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000560#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000561void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000563 CPUAddressSpace *newas;
564
565 /* Target code should have set num_ases before calling us */
566 assert(asidx < cpu->num_ases);
567
Peter Maydell56943e82016-01-21 14:15:04 +0000568 if (asidx == 0) {
569 /* address space 0 gets the convenience alias */
570 cpu->as = as;
571 }
572
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000573 /* KVM cannot currently support multiple address spaces. */
574 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000575
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000576 if (!cpu->cpu_ases) {
577 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000578 }
Peter Maydell32857f42015-10-01 15:29:50 +0100579
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000580 newas = &cpu->cpu_ases[asidx];
581 newas->cpu = cpu;
582 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000583 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000584 newas->tcg_as_listener.commit = tcg_commit;
585 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000586 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000587}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000588
589AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
590{
591 /* Return the AddressSpace corresponding to the specified index */
592 return cpu->cpu_ases[asidx].as;
593}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000594#endif
595
Bharata B Raob7bca732015-06-23 19:31:13 -0700596#ifndef CONFIG_USER_ONLY
597static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
598
599static int cpu_get_free_index(Error **errp)
600{
601 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
602
603 if (cpu >= MAX_CPUMASK_BITS) {
604 error_setg(errp, "Trying to use more CPUs than max of %d",
605 MAX_CPUMASK_BITS);
606 return -1;
607 }
608
609 bitmap_set(cpu_index_map, cpu, 1);
610 return cpu;
611}
612
613void cpu_exec_exit(CPUState *cpu)
614{
615 if (cpu->cpu_index == -1) {
616 /* cpu_index was never allocated by this @cpu or was already freed. */
617 return;
618 }
619
620 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
621 cpu->cpu_index = -1;
622}
623#else
624
625static int cpu_get_free_index(Error **errp)
626{
627 CPUState *some_cpu;
628 int cpu_index = 0;
629
630 CPU_FOREACH(some_cpu) {
631 cpu_index++;
632 }
633 return cpu_index;
634}
635
636void cpu_exec_exit(CPUState *cpu)
637{
638}
639#endif
640
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700641void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000642{
Andreas Färberb170fce2013-01-20 20:23:22 +0100643 CPUClass *cc = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700644 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000645
Peter Maydell56943e82016-01-21 14:15:04 +0000646 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000647 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000648
Eduardo Habkost291135b2015-04-27 17:00:33 -0300649#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300650 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000651
652 /* This is a softmmu CPU object, so create a property for it
653 * so users can wire up its memory. (This can't go in qom/cpu.c
654 * because that file is compiled only once for both user-mode
655 * and system builds.) The default if no link is set up is to use
656 * the system address space.
657 */
658 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
659 (Object **)&cpu->memory,
660 qdev_prop_allow_set_link_before_realize,
661 OBJ_PROP_LINK_UNREF_ON_RELEASE,
662 &error_abort);
663 cpu->memory = system_memory;
664 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300665#endif
666
pbrookc2764712009-03-07 15:24:59 +0000667#if defined(CONFIG_USER_ONLY)
668 cpu_list_lock();
669#endif
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200670 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700671 if (local_err) {
672 error_propagate(errp, local_err);
673#if defined(CONFIG_USER_ONLY)
674 cpu_list_unlock();
675#endif
676 return;
bellard6a00d602005-11-21 23:25:50 +0000677 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200678 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000679#if defined(CONFIG_USER_ONLY)
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200680 (void) cc;
pbrookc2764712009-03-07 15:24:59 +0000681 cpu_list_unlock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200682#else
Andreas Färbere0d47942013-07-29 04:07:50 +0200683 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200684 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200685 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100686 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200687 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100688 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200689#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000690}
691
Paul Brook94df27f2010-02-28 23:47:45 +0000692#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200693static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000694{
695 tb_invalidate_phys_page_range(pc, pc + 1, 0);
696}
697#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200698static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400699{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000700 MemTxAttrs attrs;
701 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
702 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400703 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000704 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100705 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400706 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400707}
bellardc27004e2005-01-03 23:35:10 +0000708#endif
bellardd720b932004-04-25 17:57:43 +0000709
Paul Brookc527ee82010-03-01 03:31:14 +0000710#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200711void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000712
713{
714}
715
Peter Maydell3ee887e2014-09-12 14:06:48 +0100716int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
717 int flags)
718{
719 return -ENOSYS;
720}
721
722void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
723{
724}
725
Andreas Färber75a34032013-09-02 16:57:02 +0200726int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000727 int flags, CPUWatchpoint **watchpoint)
728{
729 return -ENOSYS;
730}
731#else
pbrook6658ffb2007-03-16 23:58:11 +0000732/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200733int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000734 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000735{
aliguoric0ce9982008-11-25 22:13:57 +0000736 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000737
Peter Maydell05068c02014-09-12 14:06:48 +0100738 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700739 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200740 error_report("tried to set invalid watchpoint at %"
741 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000742 return -EINVAL;
743 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500744 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000745
aliguoria1d1bb32008-11-18 20:07:32 +0000746 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100747 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000748 wp->flags = flags;
749
aliguori2dc9f412008-11-18 20:56:59 +0000750 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200751 if (flags & BP_GDB) {
752 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
753 } else {
754 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
755 }
aliguoria1d1bb32008-11-18 20:07:32 +0000756
Andreas Färber31b030d2013-09-04 01:29:02 +0200757 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000758
759 if (watchpoint)
760 *watchpoint = wp;
761 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000762}
763
aliguoria1d1bb32008-11-18 20:07:32 +0000764/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200765int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000766 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000767{
aliguoria1d1bb32008-11-18 20:07:32 +0000768 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000769
Andreas Färberff4700b2013-08-26 18:23:18 +0200770 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100771 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000772 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200773 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000774 return 0;
775 }
776 }
aliguoria1d1bb32008-11-18 20:07:32 +0000777 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000778}
779
aliguoria1d1bb32008-11-18 20:07:32 +0000780/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200781void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000782{
Andreas Färberff4700b2013-08-26 18:23:18 +0200783 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000784
Andreas Färber31b030d2013-09-04 01:29:02 +0200785 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000786
Anthony Liguori7267c092011-08-20 22:09:37 -0500787 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000788}
789
aliguoria1d1bb32008-11-18 20:07:32 +0000790/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200791void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000792{
aliguoric0ce9982008-11-25 22:13:57 +0000793 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000794
Andreas Färberff4700b2013-08-26 18:23:18 +0200795 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200796 if (wp->flags & mask) {
797 cpu_watchpoint_remove_by_ref(cpu, wp);
798 }
aliguoric0ce9982008-11-25 22:13:57 +0000799 }
aliguoria1d1bb32008-11-18 20:07:32 +0000800}
Peter Maydell05068c02014-09-12 14:06:48 +0100801
802/* Return true if this watchpoint address matches the specified
803 * access (ie the address range covered by the watchpoint overlaps
804 * partially or completely with the address range covered by the
805 * access).
806 */
807static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
808 vaddr addr,
809 vaddr len)
810{
811 /* We know the lengths are non-zero, but a little caution is
812 * required to avoid errors in the case where the range ends
813 * exactly at the top of the address space and so addr + len
814 * wraps round to zero.
815 */
816 vaddr wpend = wp->vaddr + wp->len - 1;
817 vaddr addrend = addr + len - 1;
818
819 return !(addr > wpend || wp->vaddr > addrend);
820}
821
Paul Brookc527ee82010-03-01 03:31:14 +0000822#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000823
824/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200825int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000826 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000827{
aliguoric0ce9982008-11-25 22:13:57 +0000828 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000829
Anthony Liguori7267c092011-08-20 22:09:37 -0500830 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000831
832 bp->pc = pc;
833 bp->flags = flags;
834
aliguori2dc9f412008-11-18 20:56:59 +0000835 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200836 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200838 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200839 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200840 }
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färberf0c3c502013-08-26 21:22:53 +0200842 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000843
Andreas Färber00b941e2013-06-29 18:55:54 +0200844 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000845 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200846 }
aliguoria1d1bb32008-11-18 20:07:32 +0000847 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000848}
849
850/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200851int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000852{
aliguoria1d1bb32008-11-18 20:07:32 +0000853 CPUBreakpoint *bp;
854
Andreas Färberf0c3c502013-08-26 21:22:53 +0200855 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000856 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200857 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000858 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000859 }
bellard4c3a88a2003-07-26 12:06:08 +0000860 }
aliguoria1d1bb32008-11-18 20:07:32 +0000861 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000862}
863
aliguoria1d1bb32008-11-18 20:07:32 +0000864/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200865void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000866{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200867 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
868
869 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000870
Anthony Liguori7267c092011-08-20 22:09:37 -0500871 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000872}
873
874/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200875void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000876{
aliguoric0ce9982008-11-25 22:13:57 +0000877 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000878
Andreas Färberf0c3c502013-08-26 21:22:53 +0200879 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200880 if (bp->flags & mask) {
881 cpu_breakpoint_remove_by_ref(cpu, bp);
882 }
aliguoric0ce9982008-11-25 22:13:57 +0000883 }
bellard4c3a88a2003-07-26 12:06:08 +0000884}
885
bellardc33a3462003-07-29 20:50:33 +0000886/* enable or disable single step mode. EXCP_DEBUG is returned by the
887 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200888void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000889{
Andreas Färbered2803d2013-06-21 20:20:45 +0200890 if (cpu->singlestep_enabled != enabled) {
891 cpu->singlestep_enabled = enabled;
892 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200893 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200894 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100895 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000896 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700897 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000898 }
bellardc33a3462003-07-29 20:50:33 +0000899 }
bellardc33a3462003-07-29 20:50:33 +0000900}
901
Andreas Färbera47dddd2013-09-03 17:38:47 +0200902void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000903{
904 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000905 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000906
907 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000908 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000909 fprintf(stderr, "qemu: fatal: ");
910 vfprintf(stderr, fmt, ap);
911 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200912 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100913 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000914 qemu_log("qemu: fatal: ");
915 qemu_log_vprintf(fmt, ap2);
916 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200917 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000918 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000919 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000920 }
pbrook493ae1f2007-11-23 16:53:59 +0000921 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000922 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300923 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200924#if defined(CONFIG_USER_ONLY)
925 {
926 struct sigaction act;
927 sigfillset(&act.sa_mask);
928 act.sa_handler = SIG_DFL;
929 sigaction(SIGABRT, &act, NULL);
930 }
931#endif
bellard75012672003-06-21 13:11:07 +0000932 abort();
933}
934
bellard01243112004-01-04 15:48:17 +0000935#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400936/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200937static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
938{
939 RAMBlock *block;
940
Paolo Bonzini43771532013-09-09 17:58:40 +0200941 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200942 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200943 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200944 }
Mike Day0dc3f442013-09-05 14:41:35 -0400945 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200946 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200947 goto found;
948 }
949 }
950
951 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
952 abort();
953
954found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200955 /* It is safe to write mru_block outside the iothread lock. This
956 * is what happens:
957 *
958 * mru_block = xxx
959 * rcu_read_unlock()
960 * xxx removed from list
961 * rcu_read_lock()
962 * read mru_block
963 * mru_block = NULL;
964 * call_rcu(reclaim_ramblock, xxx);
965 * rcu_read_unlock()
966 *
967 * atomic_rcu_set is not needed here. The block was already published
968 * when it was placed into the list. Here we're just making an extra
969 * copy of the pointer.
970 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200971 ram_list.mru_block = block;
972 return block;
973}
974
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200975static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000976{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700977 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200978 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200979 RAMBlock *block;
980 ram_addr_t end;
981
982 end = TARGET_PAGE_ALIGN(start + length);
983 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000984
Mike Day0dc3f442013-09-05 14:41:35 -0400985 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200986 block = qemu_get_ram_block(start);
987 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200988 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700989 CPU_FOREACH(cpu) {
990 tlb_reset_dirty(cpu, start1, length);
991 }
Mike Day0dc3f442013-09-05 14:41:35 -0400992 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200993}
994
995/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000996bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
997 ram_addr_t length,
998 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200999{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001000 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001001 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001002 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001003
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001004 if (length == 0) {
1005 return false;
1006 }
1007
1008 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1009 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001010
1011 rcu_read_lock();
1012
1013 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1014
1015 while (page < end) {
1016 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1017 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1018 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1019
1020 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1021 offset, num);
1022 page += num;
1023 }
1024
1025 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001026
1027 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001028 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001029 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001030
1031 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001032}
1033
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001034/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001035hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001036 MemoryRegionSection *section,
1037 target_ulong vaddr,
1038 hwaddr paddr, hwaddr xlat,
1039 int prot,
1040 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001041{
Avi Kivitya8170e52012-10-23 12:30:10 +02001042 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001043 CPUWatchpoint *wp;
1044
Blue Swirlcc5bea62012-04-14 14:56:48 +00001045 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001046 /* Normal RAM. */
1047 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001048 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001049 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001050 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001051 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001052 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001053 }
1054 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001055 AddressSpaceDispatch *d;
1056
1057 d = atomic_rcu_read(&section->address_space->dispatch);
1058 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001059 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001060 }
1061
1062 /* Make accesses to pages with watchpoints go via the
1063 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001064 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001065 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001066 /* Avoid trapping reads of pages with a write breakpoint. */
1067 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001068 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001069 *address |= TLB_MMIO;
1070 break;
1071 }
1072 }
1073 }
1074
1075 return iotlb;
1076}
bellard9fa3e852004-01-04 18:06:42 +00001077#endif /* defined(CONFIG_USER_ONLY) */
1078
pbrooke2eef172008-06-08 01:09:01 +00001079#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001080
Anthony Liguoric227f092009-10-01 16:12:16 -05001081static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001082 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001083static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001084
Igor Mammedova2b257d2014-10-31 16:38:37 +00001085static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1086 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001087
1088/*
1089 * Set a custom physical guest memory alloator.
1090 * Accelerators with unusual needs may need this. Hopefully, we can
1091 * get rid of it eventually.
1092 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001093void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001094{
1095 phys_mem_alloc = alloc;
1096}
1097
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001098static uint16_t phys_section_add(PhysPageMap *map,
1099 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001100{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001101 /* The physical section number is ORed with a page-aligned
1102 * pointer to produce the iotlb entries. Thus it should
1103 * never overflow into the page-aligned value.
1104 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001105 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001106
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001107 if (map->sections_nb == map->sections_nb_alloc) {
1108 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1109 map->sections = g_renew(MemoryRegionSection, map->sections,
1110 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001111 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001112 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001113 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001114 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001115}
1116
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001117static void phys_section_destroy(MemoryRegion *mr)
1118{
Don Slutz55b4e802015-11-30 17:11:04 -05001119 bool have_sub_page = mr->subpage;
1120
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001121 memory_region_unref(mr);
1122
Don Slutz55b4e802015-11-30 17:11:04 -05001123 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001124 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001125 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001126 g_free(subpage);
1127 }
1128}
1129
Paolo Bonzini60926662013-05-29 12:30:26 +02001130static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001131{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001132 while (map->sections_nb > 0) {
1133 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001134 phys_section_destroy(section->mr);
1135 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001136 g_free(map->sections);
1137 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001138}
1139
Avi Kivityac1970f2012-10-03 16:22:53 +02001140static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001141{
1142 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001143 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001145 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001146 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001147 MemoryRegionSection subsection = {
1148 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001149 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001150 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001151 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001152
Avi Kivityf3705d52012-03-08 16:16:34 +02001153 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001154
Avi Kivityf3705d52012-03-08 16:16:34 +02001155 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001156 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001157 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001158 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001159 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001160 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001161 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001162 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001163 }
1164 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001165 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001166 subpage_register(subpage, start, end,
1167 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001168}
1169
1170
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001171static void register_multipage(AddressSpaceDispatch *d,
1172 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001173{
Avi Kivitya8170e52012-10-23 12:30:10 +02001174 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001175 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001176 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1177 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001178
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001179 assert(num_pages);
1180 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001181}
1182
Avi Kivityac1970f2012-10-03 16:22:53 +02001183static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001184{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001185 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001186 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001187 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001188 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001189
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001190 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1191 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1192 - now.offset_within_address_space;
1193
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001194 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001195 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001196 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001197 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001198 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001199 while (int128_ne(remain.size, now.size)) {
1200 remain.size = int128_sub(remain.size, now.size);
1201 remain.offset_within_address_space += int128_get64(now.size);
1202 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001203 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001204 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001205 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001206 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001207 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001208 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001209 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001210 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001211 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001212 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001213 }
1214}
1215
Sheng Yang62a27442010-01-26 19:21:16 +08001216void qemu_flush_coalesced_mmio_buffer(void)
1217{
1218 if (kvm_enabled())
1219 kvm_flush_coalesced_mmio_buffer();
1220}
1221
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001222void qemu_mutex_lock_ramlist(void)
1223{
1224 qemu_mutex_lock(&ram_list.mutex);
1225}
1226
1227void qemu_mutex_unlock_ramlist(void)
1228{
1229 qemu_mutex_unlock(&ram_list.mutex);
1230}
1231
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001232#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001233static void *file_ram_alloc(RAMBlock *block,
1234 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001235 const char *path,
1236 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001238 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001239 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001240 char *sanitized_name;
1241 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001242 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001243 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001244 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001245
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001246 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1247 error_setg(errp,
1248 "host lacks kvm mmu notifiers, -mem-path unsupported");
1249 return NULL;
1250 }
1251
1252 for (;;) {
1253 fd = open(path, O_RDWR);
1254 if (fd >= 0) {
1255 /* @path names an existing file, use it */
1256 break;
1257 }
1258 if (errno == ENOENT) {
1259 /* @path names a file that doesn't exist, create it */
1260 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1261 if (fd >= 0) {
1262 unlink_on_error = true;
1263 break;
1264 }
1265 } else if (errno == EISDIR) {
1266 /* @path names a directory, create a file there */
1267 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1268 sanitized_name = g_strdup(memory_region_name(block->mr));
1269 for (c = sanitized_name; *c != '\0'; c++) {
1270 if (*c == '/') {
1271 *c = '_';
1272 }
1273 }
1274
1275 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1276 sanitized_name);
1277 g_free(sanitized_name);
1278
1279 fd = mkstemp(filename);
1280 if (fd >= 0) {
1281 unlink(filename);
1282 g_free(filename);
1283 break;
1284 }
1285 g_free(filename);
1286 }
1287 if (errno != EEXIST && errno != EINTR) {
1288 error_setg_errno(errp, errno,
1289 "can't open backing store %s for guest RAM",
1290 path);
1291 goto error;
1292 }
1293 /*
1294 * Try again on EINTR and EEXIST. The latter happens when
1295 * something else creates the file between our two open().
1296 */
1297 }
1298
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001299 page_size = qemu_fd_getpagesize(fd);
1300 block->mr->align = page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001301
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001302 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001303 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001304 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001305 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001306 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001307 }
1308
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001309 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001310
1311 /*
1312 * ftruncate is not supported by hugetlbfs in older
1313 * hosts, so don't bother bailing out on errors.
1314 * If anything goes wrong with it under other filesystems,
1315 * mmap will fail.
1316 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001317 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001318 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001319 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001320
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001321 area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001322 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001323 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001324 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001325 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001326 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001327
1328 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001329 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001330 }
1331
Alex Williamson04b16652010-07-02 11:13:17 -06001332 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001333 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001334
1335error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001336 if (unlink_on_error) {
1337 unlink(path);
1338 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001339 if (fd != -1) {
1340 close(fd);
1341 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001342 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001343}
1344#endif
1345
Mike Day0dc3f442013-09-05 14:41:35 -04001346/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001347static ram_addr_t find_ram_offset(ram_addr_t size)
1348{
Alex Williamson04b16652010-07-02 11:13:17 -06001349 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001350 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001351
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001352 assert(size != 0); /* it would hand out same offset multiple times */
1353
Mike Day0dc3f442013-09-05 14:41:35 -04001354 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001355 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001356 }
Alex Williamson04b16652010-07-02 11:13:17 -06001357
Mike Day0dc3f442013-09-05 14:41:35 -04001358 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001359 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001360
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001361 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001362
Mike Day0dc3f442013-09-05 14:41:35 -04001363 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001364 if (next_block->offset >= end) {
1365 next = MIN(next, next_block->offset);
1366 }
1367 }
1368 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001369 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001370 mingap = next - end;
1371 }
1372 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001373
1374 if (offset == RAM_ADDR_MAX) {
1375 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1376 (uint64_t)size);
1377 abort();
1378 }
1379
Alex Williamson04b16652010-07-02 11:13:17 -06001380 return offset;
1381}
1382
Juan Quintela652d7ec2012-07-20 10:37:54 +02001383ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001384{
Alex Williamsond17b5282010-06-25 11:08:38 -06001385 RAMBlock *block;
1386 ram_addr_t last = 0;
1387
Mike Day0dc3f442013-09-05 14:41:35 -04001388 rcu_read_lock();
1389 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001390 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001391 }
Mike Day0dc3f442013-09-05 14:41:35 -04001392 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001393 return last;
1394}
1395
Jason Baronddb97f12012-08-02 15:44:16 -04001396static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1397{
1398 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001399
1400 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001401 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001402 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1403 if (ret) {
1404 perror("qemu_madvise");
1405 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1406 "but dump_guest_core=off specified\n");
1407 }
1408 }
1409}
1410
Mike Day0dc3f442013-09-05 14:41:35 -04001411/* Called within an RCU critical section, or while the ramlist lock
1412 * is held.
1413 */
Hu Tao20cfe882014-04-02 15:13:26 +08001414static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001415{
Hu Tao20cfe882014-04-02 15:13:26 +08001416 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001417
Mike Day0dc3f442013-09-05 14:41:35 -04001418 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001419 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001420 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001421 }
1422 }
Hu Tao20cfe882014-04-02 15:13:26 +08001423
1424 return NULL;
1425}
1426
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001427const char *qemu_ram_get_idstr(RAMBlock *rb)
1428{
1429 return rb->idstr;
1430}
1431
Mike Dayae3a7042013-09-05 14:41:35 -04001432/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001433void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1434{
Mike Dayae3a7042013-09-05 14:41:35 -04001435 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001436
Mike Day0dc3f442013-09-05 14:41:35 -04001437 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001438 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001439 assert(new_block);
1440 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001441
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001442 if (dev) {
1443 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001444 if (id) {
1445 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001446 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001447 }
1448 }
1449 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1450
Mike Day0dc3f442013-09-05 14:41:35 -04001451 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001452 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001453 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1454 new_block->idstr);
1455 abort();
1456 }
1457 }
Mike Day0dc3f442013-09-05 14:41:35 -04001458 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001459}
1460
Mike Dayae3a7042013-09-05 14:41:35 -04001461/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001462void qemu_ram_unset_idstr(ram_addr_t addr)
1463{
Mike Dayae3a7042013-09-05 14:41:35 -04001464 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001465
Mike Dayae3a7042013-09-05 14:41:35 -04001466 /* FIXME: arch_init.c assumes that this is not called throughout
1467 * migration. Ignore the problem since hot-unplug during migration
1468 * does not work anyway.
1469 */
1470
Mike Day0dc3f442013-09-05 14:41:35 -04001471 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001472 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001473 if (block) {
1474 memset(block->idstr, 0, sizeof(block->idstr));
1475 }
Mike Day0dc3f442013-09-05 14:41:35 -04001476 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001477}
1478
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001479static int memory_try_enable_merging(void *addr, size_t len)
1480{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001481 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001482 /* disabled by the user */
1483 return 0;
1484 }
1485
1486 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1487}
1488
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001489/* Only legal before guest might have detected the memory size: e.g. on
1490 * incoming migration, or right after reset.
1491 *
1492 * As memory core doesn't know how is memory accessed, it is up to
1493 * resize callback to update device state and/or add assertions to detect
1494 * misuse, if necessary.
1495 */
1496int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1497{
1498 RAMBlock *block = find_ram_block(base);
1499
1500 assert(block);
1501
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001502 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001503
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001504 if (block->used_length == newsize) {
1505 return 0;
1506 }
1507
1508 if (!(block->flags & RAM_RESIZEABLE)) {
1509 error_setg_errno(errp, EINVAL,
1510 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1511 " in != 0x" RAM_ADDR_FMT, block->idstr,
1512 newsize, block->used_length);
1513 return -EINVAL;
1514 }
1515
1516 if (block->max_length < newsize) {
1517 error_setg_errno(errp, EINVAL,
1518 "Length too large: %s: 0x" RAM_ADDR_FMT
1519 " > 0x" RAM_ADDR_FMT, block->idstr,
1520 newsize, block->max_length);
1521 return -EINVAL;
1522 }
1523
1524 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1525 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001526 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1527 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001528 memory_region_set_size(block->mr, newsize);
1529 if (block->resized) {
1530 block->resized(block->idstr, newsize, block->host);
1531 }
1532 return 0;
1533}
1534
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001535/* Called with ram_list.mutex held */
1536static void dirty_memory_extend(ram_addr_t old_ram_size,
1537 ram_addr_t new_ram_size)
1538{
1539 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1540 DIRTY_MEMORY_BLOCK_SIZE);
1541 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1542 DIRTY_MEMORY_BLOCK_SIZE);
1543 int i;
1544
1545 /* Only need to extend if block count increased */
1546 if (new_num_blocks <= old_num_blocks) {
1547 return;
1548 }
1549
1550 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1551 DirtyMemoryBlocks *old_blocks;
1552 DirtyMemoryBlocks *new_blocks;
1553 int j;
1554
1555 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1556 new_blocks = g_malloc(sizeof(*new_blocks) +
1557 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1558
1559 if (old_num_blocks) {
1560 memcpy(new_blocks->blocks, old_blocks->blocks,
1561 old_num_blocks * sizeof(old_blocks->blocks[0]));
1562 }
1563
1564 for (j = old_num_blocks; j < new_num_blocks; j++) {
1565 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1566 }
1567
1568 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1569
1570 if (old_blocks) {
1571 g_free_rcu(old_blocks, rcu);
1572 }
1573 }
1574}
1575
Fam Zheng528f46a2016-03-01 14:18:18 +08001576static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001577{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001578 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001579 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001580 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001581 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001582
1583 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001584
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001585 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001586 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001587
1588 if (!new_block->host) {
1589 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001590 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001591 new_block->mr, &err);
1592 if (err) {
1593 error_propagate(errp, err);
1594 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001595 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001596 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001597 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001598 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001599 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001600 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001601 error_setg_errno(errp, errno,
1602 "cannot set up guest memory '%s'",
1603 memory_region_name(new_block->mr));
1604 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001605 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001606 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001607 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001608 }
1609 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001610
Li Zhijiandd631692015-07-02 20:18:06 +08001611 new_ram_size = MAX(old_ram_size,
1612 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1613 if (new_ram_size > old_ram_size) {
1614 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001615 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001616 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001617 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1618 * QLIST (which has an RCU-friendly variant) does not have insertion at
1619 * tail, so save the last element in last_block.
1620 */
Mike Day0dc3f442013-09-05 14:41:35 -04001621 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001622 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001623 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001624 break;
1625 }
1626 }
1627 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001628 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001629 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001630 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001631 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001632 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001633 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001634 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001635
Mike Day0dc3f442013-09-05 14:41:35 -04001636 /* Write list before version */
1637 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001638 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001639 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001640
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001641 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001642 new_block->used_length,
1643 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001644
Paolo Bonzinia904c912015-01-21 16:18:35 +01001645 if (new_block->host) {
1646 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1647 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1648 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1649 if (kvm_enabled()) {
1650 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1651 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001652 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001653}
1654
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001655#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001656RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1657 bool share, const char *mem_path,
1658 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001659{
1660 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001661 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001662
1663 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001664 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001665 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001666 }
1667
1668 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1669 /*
1670 * file_ram_alloc() needs to allocate just like
1671 * phys_mem_alloc, but we haven't bothered to provide
1672 * a hook there.
1673 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001674 error_setg(errp,
1675 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001676 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001677 }
1678
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001679 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001680 new_block = g_malloc0(sizeof(*new_block));
1681 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001682 new_block->used_length = size;
1683 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001684 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001685 new_block->host = file_ram_alloc(new_block, size,
1686 mem_path, errp);
1687 if (!new_block->host) {
1688 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001689 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001690 }
1691
Fam Zheng528f46a2016-03-01 14:18:18 +08001692 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001693 if (local_err) {
1694 g_free(new_block);
1695 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001696 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001697 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001698 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001699}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001700#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001701
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001702static
Fam Zheng528f46a2016-03-01 14:18:18 +08001703RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1704 void (*resized)(const char*,
1705 uint64_t length,
1706 void *host),
1707 void *host, bool resizeable,
1708 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001709{
1710 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001711 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001712
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001713 size = HOST_PAGE_ALIGN(size);
1714 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001715 new_block = g_malloc0(sizeof(*new_block));
1716 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001717 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001718 new_block->used_length = size;
1719 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001720 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001721 new_block->fd = -1;
1722 new_block->host = host;
1723 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001724 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001725 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001726 if (resizeable) {
1727 new_block->flags |= RAM_RESIZEABLE;
1728 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001729 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001730 if (local_err) {
1731 g_free(new_block);
1732 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001733 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001734 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001735 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001736}
1737
Fam Zheng528f46a2016-03-01 14:18:18 +08001738RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001739 MemoryRegion *mr, Error **errp)
1740{
1741 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1742}
1743
Fam Zheng528f46a2016-03-01 14:18:18 +08001744RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001745{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001746 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1747}
1748
Fam Zheng528f46a2016-03-01 14:18:18 +08001749RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001750 void (*resized)(const char*,
1751 uint64_t length,
1752 void *host),
1753 MemoryRegion *mr, Error **errp)
1754{
1755 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001756}
bellarde9a1ab12007-02-08 23:08:38 +00001757
Paolo Bonzini43771532013-09-09 17:58:40 +02001758static void reclaim_ramblock(RAMBlock *block)
1759{
1760 if (block->flags & RAM_PREALLOC) {
1761 ;
1762 } else if (xen_enabled()) {
1763 xen_invalidate_map_cache_entry(block->host);
1764#ifndef _WIN32
1765 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001766 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001767 close(block->fd);
1768#endif
1769 } else {
1770 qemu_anon_ram_free(block->host, block->max_length);
1771 }
1772 g_free(block);
1773}
1774
Fam Zhengf1060c52016-03-01 14:18:22 +08001775void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001776{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001777 if (!block) {
1778 return;
1779 }
1780
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001781 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001782 QLIST_REMOVE_RCU(block, next);
1783 ram_list.mru_block = NULL;
1784 /* Write list before version */
1785 smp_wmb();
1786 ram_list.version++;
1787 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001788 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001789}
1790
Huang Yingcd19cfa2011-03-02 08:56:19 +01001791#ifndef _WIN32
1792void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1793{
1794 RAMBlock *block;
1795 ram_addr_t offset;
1796 int flags;
1797 void *area, *vaddr;
1798
Mike Day0dc3f442013-09-05 14:41:35 -04001799 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001800 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001801 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001802 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001803 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001804 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001805 } else if (xen_enabled()) {
1806 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001807 } else {
1808 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001809 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001810 flags |= (block->flags & RAM_SHARED ?
1811 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001812 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1813 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001814 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001815 /*
1816 * Remap needs to match alloc. Accelerators that
1817 * set phys_mem_alloc never remap. If they did,
1818 * we'd need a remap hook here.
1819 */
1820 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1821
Huang Yingcd19cfa2011-03-02 08:56:19 +01001822 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1823 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1824 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001825 }
1826 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001827 fprintf(stderr, "Could not remap addr: "
1828 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001829 length, addr);
1830 exit(1);
1831 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001832 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001833 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001834 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001835 }
1836 }
1837}
1838#endif /* !_WIN32 */
1839
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001840int qemu_get_ram_fd(ram_addr_t addr)
1841{
Mike Dayae3a7042013-09-05 14:41:35 -04001842 RAMBlock *block;
1843 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001844
Mike Day0dc3f442013-09-05 14:41:35 -04001845 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001846 block = qemu_get_ram_block(addr);
1847 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001848 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001849 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001850}
1851
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001852void qemu_set_ram_fd(ram_addr_t addr, int fd)
1853{
1854 RAMBlock *block;
1855
1856 rcu_read_lock();
1857 block = qemu_get_ram_block(addr);
1858 block->fd = fd;
1859 rcu_read_unlock();
1860}
1861
Damjan Marion3fd74b82014-06-26 23:01:32 +02001862void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1863{
Mike Dayae3a7042013-09-05 14:41:35 -04001864 RAMBlock *block;
1865 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001866
Mike Day0dc3f442013-09-05 14:41:35 -04001867 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001868 block = qemu_get_ram_block(addr);
1869 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001870 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001871 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001872}
1873
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001874/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001875 * This should not be used for general purpose DMA. Use address_space_map
1876 * or address_space_rw instead. For local memory (e.g. video ram) that the
1877 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001878 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001879 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001880 */
Gonglei3655cb92016-02-20 10:35:20 +08001881void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001882{
Gonglei3655cb92016-02-20 10:35:20 +08001883 RAMBlock *block = ram_block;
1884
1885 if (block == NULL) {
1886 block = qemu_get_ram_block(addr);
1887 }
Mike Dayae3a7042013-09-05 14:41:35 -04001888
1889 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001890 /* We need to check if the requested address is in the RAM
1891 * because we don't want to map the entire memory in QEMU.
1892 * In that case just map until the end of the page.
1893 */
1894 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001895 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001896 }
Mike Dayae3a7042013-09-05 14:41:35 -04001897
1898 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001899 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001900 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001901}
1902
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001903/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001904 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001905 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001906 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001907 */
Gonglei3655cb92016-02-20 10:35:20 +08001908static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1909 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001910{
Gonglei3655cb92016-02-20 10:35:20 +08001911 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001912 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001913 if (*size == 0) {
1914 return NULL;
1915 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001916
Gonglei3655cb92016-02-20 10:35:20 +08001917 if (block == NULL) {
1918 block = qemu_get_ram_block(addr);
1919 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001920 offset_inside_block = addr - block->offset;
1921 *size = MIN(*size, block->max_length - offset_inside_block);
1922
1923 if (xen_enabled() && block->host == NULL) {
1924 /* We need to check if the requested address is in the RAM
1925 * because we don't want to map the entire memory in QEMU.
1926 * In that case just map the requested area.
1927 */
1928 if (block->offset == 0) {
1929 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001930 }
1931
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001932 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001933 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001934
1935 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001936}
1937
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001938/*
1939 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1940 * in that RAMBlock.
1941 *
1942 * ptr: Host pointer to look up
1943 * round_offset: If true round the result offset down to a page boundary
1944 * *ram_addr: set to result ram_addr
1945 * *offset: set to result offset within the RAMBlock
1946 *
1947 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001948 *
1949 * By the time this function returns, the returned pointer is not protected
1950 * by RCU anymore. If the caller is not within an RCU critical section and
1951 * does not hold the iothread lock, it must have other means of protecting the
1952 * pointer, such as a reference to the region that includes the incoming
1953 * ram_addr_t.
1954 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001955RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1956 ram_addr_t *ram_addr,
1957 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001958{
pbrook94a6b542009-04-11 17:15:54 +00001959 RAMBlock *block;
1960 uint8_t *host = ptr;
1961
Jan Kiszka868bb332011-06-21 22:59:09 +02001962 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001963 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001964 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001965 block = qemu_get_ram_block(*ram_addr);
1966 if (block) {
1967 *offset = (host - block->host);
1968 }
Mike Day0dc3f442013-09-05 14:41:35 -04001969 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001970 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001971 }
1972
Mike Day0dc3f442013-09-05 14:41:35 -04001973 rcu_read_lock();
1974 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001975 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001976 goto found;
1977 }
1978
Mike Day0dc3f442013-09-05 14:41:35 -04001979 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001980 /* This case append when the block is not mapped. */
1981 if (block->host == NULL) {
1982 continue;
1983 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001984 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001985 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001986 }
pbrook94a6b542009-04-11 17:15:54 +00001987 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001988
Mike Day0dc3f442013-09-05 14:41:35 -04001989 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001990 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001991
1992found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001993 *offset = (host - block->host);
1994 if (round_offset) {
1995 *offset &= TARGET_PAGE_MASK;
1996 }
1997 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001998 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001999 return block;
2000}
2001
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002002/*
2003 * Finds the named RAMBlock
2004 *
2005 * name: The name of RAMBlock to find
2006 *
2007 * Returns: RAMBlock (or NULL if not found)
2008 */
2009RAMBlock *qemu_ram_block_by_name(const char *name)
2010{
2011 RAMBlock *block;
2012
2013 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2014 if (!strcmp(name, block->idstr)) {
2015 return block;
2016 }
2017 }
2018
2019 return NULL;
2020}
2021
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002022/* Some of the softmmu routines need to translate from a host pointer
2023 (typically a TLB entry) back to a ram offset. */
2024MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2025{
2026 RAMBlock *block;
2027 ram_addr_t offset; /* Not used */
2028
2029 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2030
2031 if (!block) {
2032 return NULL;
2033 }
2034
2035 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002036}
Alex Williamsonf471a172010-06-11 11:11:42 -06002037
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002038/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002039static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002040 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002041{
Juan Quintela52159192013-10-08 12:44:04 +02002042 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002043 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002044 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002045 switch (size) {
2046 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002047 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002048 break;
2049 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002050 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002051 break;
2052 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002053 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002054 break;
2055 default:
2056 abort();
2057 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002058 /* Set both VGA and migration bits for simplicity and to remove
2059 * the notdirty callback faster.
2060 */
2061 cpu_physical_memory_set_dirty_range(ram_addr, size,
2062 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002063 /* we remove the notdirty callback only if the code has been
2064 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002065 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002066 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002067 }
bellard1ccde1c2004-02-06 19:46:14 +00002068}
2069
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002070static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2071 unsigned size, bool is_write)
2072{
2073 return is_write;
2074}
2075
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002076static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002077 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002078 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002079 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002080};
2081
pbrook0f459d12008-06-09 00:20:13 +00002082/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002083static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002084{
Andreas Färber93afead2013-08-26 03:41:01 +02002085 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002086 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002087 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002088 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002089 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002090 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002091 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002092
Andreas Färberff4700b2013-08-26 18:23:18 +02002093 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002094 /* We re-entered the check after replacing the TB. Now raise
2095 * the debug interrupt so that is will trigger after the
2096 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002097 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002098 return;
2099 }
Andreas Färber93afead2013-08-26 03:41:01 +02002100 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002101 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002102 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2103 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002104 if (flags == BP_MEM_READ) {
2105 wp->flags |= BP_WATCHPOINT_HIT_READ;
2106 } else {
2107 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2108 }
2109 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002110 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002111 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002112 if (wp->flags & BP_CPU &&
2113 !cc->debug_check_watchpoint(cpu, wp)) {
2114 wp->flags &= ~BP_WATCHPOINT_HIT;
2115 continue;
2116 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002117 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002118 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002119 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002120 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002121 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002122 } else {
2123 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002124 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002125 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002126 }
aliguori06d55cc2008-11-18 20:24:06 +00002127 }
aliguori6e140f22008-11-18 20:37:55 +00002128 } else {
2129 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002130 }
2131 }
2132}
2133
pbrook6658ffb2007-03-16 23:58:11 +00002134/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2135 so these check for a hit then pass through to the normal out-of-line
2136 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002137static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2138 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002139{
Peter Maydell66b9b432015-04-26 16:49:24 +01002140 MemTxResult res;
2141 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002142 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2143 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002144
Peter Maydell66b9b432015-04-26 16:49:24 +01002145 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002146 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002147 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002148 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002149 break;
2150 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002151 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002152 break;
2153 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002154 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002155 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002156 default: abort();
2157 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002158 *pdata = data;
2159 return res;
2160}
2161
2162static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2163 uint64_t val, unsigned size,
2164 MemTxAttrs attrs)
2165{
2166 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002167 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2168 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002169
2170 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2171 switch (size) {
2172 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002173 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002174 break;
2175 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002176 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002177 break;
2178 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002179 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002180 break;
2181 default: abort();
2182 }
2183 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002184}
2185
Avi Kivity1ec9b902012-01-02 12:47:48 +02002186static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002187 .read_with_attrs = watch_mem_read,
2188 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002189 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002190};
pbrook6658ffb2007-03-16 23:58:11 +00002191
Peter Maydellf25a49e2015-04-26 16:49:24 +01002192static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2193 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002194{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002195 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002196 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002197 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002198
blueswir1db7b5422007-05-26 17:36:03 +00002199#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002200 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002201 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002202#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002203 res = address_space_read(subpage->as, addr + subpage->base,
2204 attrs, buf, len);
2205 if (res) {
2206 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002207 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002208 switch (len) {
2209 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002210 *data = ldub_p(buf);
2211 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002212 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002213 *data = lduw_p(buf);
2214 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002215 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002216 *data = ldl_p(buf);
2217 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002218 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002219 *data = ldq_p(buf);
2220 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002221 default:
2222 abort();
2223 }
blueswir1db7b5422007-05-26 17:36:03 +00002224}
2225
Peter Maydellf25a49e2015-04-26 16:49:24 +01002226static MemTxResult subpage_write(void *opaque, hwaddr addr,
2227 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002228{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002229 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002230 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002231
blueswir1db7b5422007-05-26 17:36:03 +00002232#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002233 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002234 " value %"PRIx64"\n",
2235 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002236#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002237 switch (len) {
2238 case 1:
2239 stb_p(buf, value);
2240 break;
2241 case 2:
2242 stw_p(buf, value);
2243 break;
2244 case 4:
2245 stl_p(buf, value);
2246 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002247 case 8:
2248 stq_p(buf, value);
2249 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002250 default:
2251 abort();
2252 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002253 return address_space_write(subpage->as, addr + subpage->base,
2254 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002255}
2256
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002257static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002258 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002259{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002260 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002261#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002262 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002263 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002264#endif
2265
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002266 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002267 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002268}
2269
Avi Kivity70c68e42012-01-02 12:32:48 +02002270static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002271 .read_with_attrs = subpage_read,
2272 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002273 .impl.min_access_size = 1,
2274 .impl.max_access_size = 8,
2275 .valid.min_access_size = 1,
2276 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002277 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002278 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002279};
2280
Anthony Liguoric227f092009-10-01 16:12:16 -05002281static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002282 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002283{
2284 int idx, eidx;
2285
2286 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2287 return -1;
2288 idx = SUBPAGE_IDX(start);
2289 eidx = SUBPAGE_IDX(end);
2290#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002291 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2292 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002293#endif
blueswir1db7b5422007-05-26 17:36:03 +00002294 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002295 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002296 }
2297
2298 return 0;
2299}
2300
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002301static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002302{
Anthony Liguoric227f092009-10-01 16:12:16 -05002303 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002304
Anthony Liguori7267c092011-08-20 22:09:37 -05002305 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002306
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002307 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002308 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002309 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002310 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002311 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002312#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002313 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2314 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002315#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002316 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002317
2318 return mmio;
2319}
2320
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002321static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2322 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002323{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002324 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002325 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002326 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002327 .mr = mr,
2328 .offset_within_address_space = 0,
2329 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002330 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002331 };
2332
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002333 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002334}
2335
Peter Maydella54c87b2016-01-21 14:15:05 +00002336MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002337{
Peter Maydella54c87b2016-01-21 14:15:05 +00002338 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2339 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002340 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002341 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002342
2343 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002344}
2345
Avi Kivitye9179ce2009-06-14 11:38:52 +03002346static void io_mem_init(void)
2347{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002348 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002349 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002350 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002351 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002352 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002353 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002354 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002355}
2356
Avi Kivityac1970f2012-10-03 16:22:53 +02002357static void mem_begin(MemoryListener *listener)
2358{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002359 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002360 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2361 uint16_t n;
2362
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002363 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002364 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002365 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002366 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002367 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002368 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002369 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002370 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002371
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002372 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002373 d->as = as;
2374 as->next_dispatch = d;
2375}
2376
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002377static void address_space_dispatch_free(AddressSpaceDispatch *d)
2378{
2379 phys_sections_free(&d->map);
2380 g_free(d);
2381}
2382
Paolo Bonzini00752702013-05-29 12:13:54 +02002383static void mem_commit(MemoryListener *listener)
2384{
2385 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002386 AddressSpaceDispatch *cur = as->dispatch;
2387 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002388
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002389 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002390
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002391 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002392 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002393 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002394 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002395}
2396
Avi Kivity1d711482012-10-02 18:54:45 +02002397static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002398{
Peter Maydell32857f42015-10-01 15:29:50 +01002399 CPUAddressSpace *cpuas;
2400 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002401
2402 /* since each CPU stores ram addresses in its TLB cache, we must
2403 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002404 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2405 cpu_reloading_memory_map();
2406 /* The CPU and TLB are protected by the iothread lock.
2407 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2408 * may have split the RCU critical section.
2409 */
2410 d = atomic_rcu_read(&cpuas->as->dispatch);
2411 cpuas->memory_dispatch = d;
2412 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002413}
2414
Avi Kivityac1970f2012-10-03 16:22:53 +02002415void address_space_init_dispatch(AddressSpace *as)
2416{
Paolo Bonzini00752702013-05-29 12:13:54 +02002417 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002418 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002419 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002420 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002421 .region_add = mem_add,
2422 .region_nop = mem_add,
2423 .priority = 0,
2424 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002425 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002426}
2427
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002428void address_space_unregister(AddressSpace *as)
2429{
2430 memory_listener_unregister(&as->dispatch_listener);
2431}
2432
Avi Kivity83f3c252012-10-07 12:59:55 +02002433void address_space_destroy_dispatch(AddressSpace *as)
2434{
2435 AddressSpaceDispatch *d = as->dispatch;
2436
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002437 atomic_rcu_set(&as->dispatch, NULL);
2438 if (d) {
2439 call_rcu(d, address_space_dispatch_free, rcu);
2440 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002441}
2442
Avi Kivity62152b82011-07-26 14:26:14 +03002443static void memory_map_init(void)
2444{
Anthony Liguori7267c092011-08-20 22:09:37 -05002445 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002446
Paolo Bonzini57271d62013-11-07 17:14:37 +01002447 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002448 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002449
Anthony Liguori7267c092011-08-20 22:09:37 -05002450 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002451 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2452 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002453 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002454}
2455
2456MemoryRegion *get_system_memory(void)
2457{
2458 return system_memory;
2459}
2460
Avi Kivity309cb472011-08-08 16:09:03 +03002461MemoryRegion *get_system_io(void)
2462{
2463 return system_io;
2464}
2465
pbrooke2eef172008-06-08 01:09:01 +00002466#endif /* !defined(CONFIG_USER_ONLY) */
2467
bellard13eb76e2004-01-24 15:23:36 +00002468/* physical memory access (slow version, mainly for debug) */
2469#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002470int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002471 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002472{
2473 int l, flags;
2474 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002475 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002476
2477 while (len > 0) {
2478 page = addr & TARGET_PAGE_MASK;
2479 l = (page + TARGET_PAGE_SIZE) - addr;
2480 if (l > len)
2481 l = len;
2482 flags = page_get_flags(page);
2483 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002484 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002485 if (is_write) {
2486 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002487 return -1;
bellard579a97f2007-11-11 14:26:47 +00002488 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002489 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002490 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002491 memcpy(p, buf, l);
2492 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002493 } else {
2494 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002495 return -1;
bellard579a97f2007-11-11 14:26:47 +00002496 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002497 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002498 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002499 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002500 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002501 }
2502 len -= l;
2503 buf += l;
2504 addr += l;
2505 }
Paul Brooka68fe892010-03-01 00:08:59 +00002506 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002507}
bellard8df1cd02005-01-28 22:37:22 +00002508
bellard13eb76e2004-01-24 15:23:36 +00002509#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002510
Paolo Bonzini845b6212015-03-23 11:45:53 +01002511static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002512 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002513{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002514 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2515 /* No early return if dirty_log_mask is or becomes 0, because
2516 * cpu_physical_memory_set_dirty_range will still call
2517 * xen_modified_memory.
2518 */
2519 if (dirty_log_mask) {
2520 dirty_log_mask =
2521 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002522 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002523 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2524 tb_invalidate_phys_range(addr, addr + length);
2525 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2526 }
2527 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002528}
2529
Richard Henderson23326162013-07-08 14:55:59 -07002530static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002531{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002532 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002533
2534 /* Regions are assumed to support 1-4 byte accesses unless
2535 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002536 if (access_size_max == 0) {
2537 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002538 }
Richard Henderson23326162013-07-08 14:55:59 -07002539
2540 /* Bound the maximum access by the alignment of the address. */
2541 if (!mr->ops->impl.unaligned) {
2542 unsigned align_size_max = addr & -addr;
2543 if (align_size_max != 0 && align_size_max < access_size_max) {
2544 access_size_max = align_size_max;
2545 }
2546 }
2547
2548 /* Don't attempt accesses larger than the maximum. */
2549 if (l > access_size_max) {
2550 l = access_size_max;
2551 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002552 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002553
2554 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002555}
2556
Jan Kiszka4840f102015-06-18 18:47:22 +02002557static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002558{
Jan Kiszka4840f102015-06-18 18:47:22 +02002559 bool unlocked = !qemu_mutex_iothread_locked();
2560 bool release_lock = false;
2561
2562 if (unlocked && mr->global_locking) {
2563 qemu_mutex_lock_iothread();
2564 unlocked = false;
2565 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002566 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002567 if (mr->flush_coalesced_mmio) {
2568 if (unlocked) {
2569 qemu_mutex_lock_iothread();
2570 }
2571 qemu_flush_coalesced_mmio_buffer();
2572 if (unlocked) {
2573 qemu_mutex_unlock_iothread();
2574 }
2575 }
2576
2577 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002578}
2579
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002580/* Called within RCU critical section. */
2581static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2582 MemTxAttrs attrs,
2583 const uint8_t *buf,
2584 int len, hwaddr addr1,
2585 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002586{
bellard13eb76e2004-01-24 15:23:36 +00002587 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002588 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002589 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002590 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002591
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002592 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002593 if (!memory_access_is_direct(mr, true)) {
2594 release_lock |= prepare_mmio_access(mr);
2595 l = memory_access_size(mr, l, addr1);
2596 /* XXX: could force current_cpu to NULL to avoid
2597 potential bugs */
2598 switch (l) {
2599 case 8:
2600 /* 64 bit write access */
2601 val = ldq_p(buf);
2602 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2603 attrs);
2604 break;
2605 case 4:
2606 /* 32 bit write access */
2607 val = ldl_p(buf);
2608 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2609 attrs);
2610 break;
2611 case 2:
2612 /* 16 bit write access */
2613 val = lduw_p(buf);
2614 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2615 attrs);
2616 break;
2617 case 1:
2618 /* 8 bit write access */
2619 val = ldub_p(buf);
2620 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2621 attrs);
2622 break;
2623 default:
2624 abort();
bellard13eb76e2004-01-24 15:23:36 +00002625 }
2626 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002627 addr1 += memory_region_get_ram_addr(mr);
2628 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002629 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002630 memcpy(ptr, buf, l);
2631 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002632 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002633
2634 if (release_lock) {
2635 qemu_mutex_unlock_iothread();
2636 release_lock = false;
2637 }
2638
bellard13eb76e2004-01-24 15:23:36 +00002639 len -= l;
2640 buf += l;
2641 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002642
2643 if (!len) {
2644 break;
2645 }
2646
2647 l = len;
2648 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002649 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002650
Peter Maydell3b643492015-04-26 16:49:23 +01002651 return result;
bellard13eb76e2004-01-24 15:23:36 +00002652}
bellard8df1cd02005-01-28 22:37:22 +00002653
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002654MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2655 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002656{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002657 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002658 hwaddr addr1;
2659 MemoryRegion *mr;
2660 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002661
2662 if (len > 0) {
2663 rcu_read_lock();
2664 l = len;
2665 mr = address_space_translate(as, addr, &addr1, &l, true);
2666 result = address_space_write_continue(as, addr, attrs, buf, len,
2667 addr1, l, mr);
2668 rcu_read_unlock();
2669 }
2670
2671 return result;
2672}
2673
2674/* Called within RCU critical section. */
2675MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2676 MemTxAttrs attrs, uint8_t *buf,
2677 int len, hwaddr addr1, hwaddr l,
2678 MemoryRegion *mr)
2679{
2680 uint8_t *ptr;
2681 uint64_t val;
2682 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002683 bool release_lock = false;
2684
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002685 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002686 if (!memory_access_is_direct(mr, false)) {
2687 /* I/O case */
2688 release_lock |= prepare_mmio_access(mr);
2689 l = memory_access_size(mr, l, addr1);
2690 switch (l) {
2691 case 8:
2692 /* 64 bit read access */
2693 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2694 attrs);
2695 stq_p(buf, val);
2696 break;
2697 case 4:
2698 /* 32 bit read access */
2699 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2700 attrs);
2701 stl_p(buf, val);
2702 break;
2703 case 2:
2704 /* 16 bit read access */
2705 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2706 attrs);
2707 stw_p(buf, val);
2708 break;
2709 case 1:
2710 /* 8 bit read access */
2711 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2712 attrs);
2713 stb_p(buf, val);
2714 break;
2715 default:
2716 abort();
2717 }
2718 } else {
2719 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002720 ptr = qemu_get_ram_ptr(mr->ram_block,
2721 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002722 memcpy(buf, ptr, l);
2723 }
2724
2725 if (release_lock) {
2726 qemu_mutex_unlock_iothread();
2727 release_lock = false;
2728 }
2729
2730 len -= l;
2731 buf += l;
2732 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002733
2734 if (!len) {
2735 break;
2736 }
2737
2738 l = len;
2739 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002740 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002741
2742 return result;
2743}
2744
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002745MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2746 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002747{
2748 hwaddr l;
2749 hwaddr addr1;
2750 MemoryRegion *mr;
2751 MemTxResult result = MEMTX_OK;
2752
2753 if (len > 0) {
2754 rcu_read_lock();
2755 l = len;
2756 mr = address_space_translate(as, addr, &addr1, &l, false);
2757 result = address_space_read_continue(as, addr, attrs, buf, len,
2758 addr1, l, mr);
2759 rcu_read_unlock();
2760 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002761
2762 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002763}
2764
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002765MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2766 uint8_t *buf, int len, bool is_write)
2767{
2768 if (is_write) {
2769 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2770 } else {
2771 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2772 }
2773}
Avi Kivityac1970f2012-10-03 16:22:53 +02002774
Avi Kivitya8170e52012-10-23 12:30:10 +02002775void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002776 int len, int is_write)
2777{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002778 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2779 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002780}
2781
Alexander Graf582b55a2013-12-11 14:17:44 +01002782enum write_rom_type {
2783 WRITE_DATA,
2784 FLUSH_CACHE,
2785};
2786
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002787static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002788 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002789{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002790 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002791 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002792 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002793 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002794
Paolo Bonzini41063e12015-03-18 14:21:43 +01002795 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002796 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002797 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002798 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002799
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002800 if (!(memory_region_is_ram(mr) ||
2801 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002802 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002803 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002804 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002805 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002806 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002807 switch (type) {
2808 case WRITE_DATA:
2809 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002810 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002811 break;
2812 case FLUSH_CACHE:
2813 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2814 break;
2815 }
bellardd0ecd2a2006-04-23 17:14:48 +00002816 }
2817 len -= l;
2818 buf += l;
2819 addr += l;
2820 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002821 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002822}
2823
Alexander Graf582b55a2013-12-11 14:17:44 +01002824/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002825void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002826 const uint8_t *buf, int len)
2827{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002828 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002829}
2830
2831void cpu_flush_icache_range(hwaddr start, int len)
2832{
2833 /*
2834 * This function should do the same thing as an icache flush that was
2835 * triggered from within the guest. For TCG we are always cache coherent,
2836 * so there is no need to flush anything. For KVM / Xen we need to flush
2837 * the host's instruction cache at least.
2838 */
2839 if (tcg_enabled()) {
2840 return;
2841 }
2842
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002843 cpu_physical_memory_write_rom_internal(&address_space_memory,
2844 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002845}
2846
aliguori6d16c2f2009-01-22 16:59:11 +00002847typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002848 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002849 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002850 hwaddr addr;
2851 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002852 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002853} BounceBuffer;
2854
2855static BounceBuffer bounce;
2856
aliguoriba223c22009-01-22 16:59:16 +00002857typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002858 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002859 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002860} MapClient;
2861
Fam Zheng38e047b2015-03-16 17:03:35 +08002862QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002863static QLIST_HEAD(map_client_list, MapClient) map_client_list
2864 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002865
Fam Zhenge95205e2015-03-16 17:03:37 +08002866static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002867{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002868 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002869 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002870}
2871
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002872static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002873{
2874 MapClient *client;
2875
Blue Swirl72cf2d42009-09-12 07:36:22 +00002876 while (!QLIST_EMPTY(&map_client_list)) {
2877 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002878 qemu_bh_schedule(client->bh);
2879 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002880 }
2881}
2882
Fam Zhenge95205e2015-03-16 17:03:37 +08002883void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002884{
2885 MapClient *client = g_malloc(sizeof(*client));
2886
Fam Zheng38e047b2015-03-16 17:03:35 +08002887 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002888 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002889 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002890 if (!atomic_read(&bounce.in_use)) {
2891 cpu_notify_map_clients_locked();
2892 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002893 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002894}
2895
Fam Zheng38e047b2015-03-16 17:03:35 +08002896void cpu_exec_init_all(void)
2897{
2898 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002899 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002900 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002901 qemu_mutex_init(&map_client_list_lock);
2902}
2903
Fam Zhenge95205e2015-03-16 17:03:37 +08002904void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002905{
Fam Zhenge95205e2015-03-16 17:03:37 +08002906 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002907
Fam Zhenge95205e2015-03-16 17:03:37 +08002908 qemu_mutex_lock(&map_client_list_lock);
2909 QLIST_FOREACH(client, &map_client_list, link) {
2910 if (client->bh == bh) {
2911 cpu_unregister_map_client_do(client);
2912 break;
2913 }
2914 }
2915 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002916}
2917
2918static void cpu_notify_map_clients(void)
2919{
Fam Zheng38e047b2015-03-16 17:03:35 +08002920 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002921 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002922 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002923}
2924
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002925bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2926{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002927 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002928 hwaddr l, xlat;
2929
Paolo Bonzini41063e12015-03-18 14:21:43 +01002930 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002931 while (len > 0) {
2932 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002933 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2934 if (!memory_access_is_direct(mr, is_write)) {
2935 l = memory_access_size(mr, l, addr);
2936 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002937 return false;
2938 }
2939 }
2940
2941 len -= l;
2942 addr += l;
2943 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002944 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002945 return true;
2946}
2947
aliguori6d16c2f2009-01-22 16:59:11 +00002948/* Map a physical memory region into a host virtual address.
2949 * May map a subset of the requested range, given by and returned in *plen.
2950 * May return NULL if resources needed to perform the mapping are exhausted.
2951 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002952 * Use cpu_register_map_client() to know when retrying the map operation is
2953 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002954 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002955void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002956 hwaddr addr,
2957 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002958 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002959{
Avi Kivitya8170e52012-10-23 12:30:10 +02002960 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002961 hwaddr done = 0;
2962 hwaddr l, xlat, base;
2963 MemoryRegion *mr, *this_mr;
2964 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002965 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002966
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002967 if (len == 0) {
2968 return NULL;
2969 }
aliguori6d16c2f2009-01-22 16:59:11 +00002970
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002971 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002972 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002973 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002974
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002975 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002976 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002977 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002978 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002979 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002980 /* Avoid unbounded allocations */
2981 l = MIN(l, TARGET_PAGE_SIZE);
2982 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002983 bounce.addr = addr;
2984 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002985
2986 memory_region_ref(mr);
2987 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002988 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002989 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2990 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002991 }
aliguori6d16c2f2009-01-22 16:59:11 +00002992
Paolo Bonzini41063e12015-03-18 14:21:43 +01002993 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002994 *plen = l;
2995 return bounce.buffer;
2996 }
2997
2998 base = xlat;
2999 raddr = memory_region_get_ram_addr(mr);
3000
3001 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00003002 len -= l;
3003 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003004 done += l;
3005 if (len == 0) {
3006 break;
3007 }
3008
3009 l = len;
3010 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3011 if (this_mr != mr || xlat != base + done) {
3012 break;
3013 }
aliguori6d16c2f2009-01-22 16:59:11 +00003014 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003015
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003016 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003017 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003018 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003019 rcu_read_unlock();
3020
3021 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003022}
3023
Avi Kivityac1970f2012-10-03 16:22:53 +02003024/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003025 * Will also mark the memory as dirty if is_write == 1. access_len gives
3026 * the amount of memory that was actually read or written by the caller.
3027 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003028void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3029 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003030{
3031 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003032 MemoryRegion *mr;
3033 ram_addr_t addr1;
3034
3035 mr = qemu_ram_addr_from_host(buffer, &addr1);
3036 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003037 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003038 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003039 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003040 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003041 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003042 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003043 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003044 return;
3045 }
3046 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003047 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3048 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003049 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003050 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003051 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003052 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003053 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003054 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003055}
bellardd0ecd2a2006-04-23 17:14:48 +00003056
Avi Kivitya8170e52012-10-23 12:30:10 +02003057void *cpu_physical_memory_map(hwaddr addr,
3058 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003059 int is_write)
3060{
3061 return address_space_map(&address_space_memory, addr, plen, is_write);
3062}
3063
Avi Kivitya8170e52012-10-23 12:30:10 +02003064void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3065 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003066{
3067 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3068}
3069
bellard8df1cd02005-01-28 22:37:22 +00003070/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003071static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3072 MemTxAttrs attrs,
3073 MemTxResult *result,
3074 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003075{
bellard8df1cd02005-01-28 22:37:22 +00003076 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003077 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003078 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003079 hwaddr l = 4;
3080 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003081 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003082 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003083
Paolo Bonzini41063e12015-03-18 14:21:43 +01003084 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003085 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003086 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003087 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003088
bellard8df1cd02005-01-28 22:37:22 +00003089 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003090 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003091#if defined(TARGET_WORDS_BIGENDIAN)
3092 if (endian == DEVICE_LITTLE_ENDIAN) {
3093 val = bswap32(val);
3094 }
3095#else
3096 if (endian == DEVICE_BIG_ENDIAN) {
3097 val = bswap32(val);
3098 }
3099#endif
bellard8df1cd02005-01-28 22:37:22 +00003100 } else {
3101 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003102 ptr = qemu_get_ram_ptr(mr->ram_block,
3103 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003104 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003105 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003106 switch (endian) {
3107 case DEVICE_LITTLE_ENDIAN:
3108 val = ldl_le_p(ptr);
3109 break;
3110 case DEVICE_BIG_ENDIAN:
3111 val = ldl_be_p(ptr);
3112 break;
3113 default:
3114 val = ldl_p(ptr);
3115 break;
3116 }
Peter Maydell50013112015-04-26 16:49:24 +01003117 r = MEMTX_OK;
3118 }
3119 if (result) {
3120 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003121 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003122 if (release_lock) {
3123 qemu_mutex_unlock_iothread();
3124 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003125 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003126 return val;
3127}
3128
Peter Maydell50013112015-04-26 16:49:24 +01003129uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3130 MemTxAttrs attrs, MemTxResult *result)
3131{
3132 return address_space_ldl_internal(as, addr, attrs, result,
3133 DEVICE_NATIVE_ENDIAN);
3134}
3135
3136uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3137 MemTxAttrs attrs, MemTxResult *result)
3138{
3139 return address_space_ldl_internal(as, addr, attrs, result,
3140 DEVICE_LITTLE_ENDIAN);
3141}
3142
3143uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3144 MemTxAttrs attrs, MemTxResult *result)
3145{
3146 return address_space_ldl_internal(as, addr, attrs, result,
3147 DEVICE_BIG_ENDIAN);
3148}
3149
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003150uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003151{
Peter Maydell50013112015-04-26 16:49:24 +01003152 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003153}
3154
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003155uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003156{
Peter Maydell50013112015-04-26 16:49:24 +01003157 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003158}
3159
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003160uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003161{
Peter Maydell50013112015-04-26 16:49:24 +01003162 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003163}
3164
bellard84b7b8e2005-11-28 21:19:04 +00003165/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003166static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3167 MemTxAttrs attrs,
3168 MemTxResult *result,
3169 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003170{
bellard84b7b8e2005-11-28 21:19:04 +00003171 uint8_t *ptr;
3172 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003173 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003174 hwaddr l = 8;
3175 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003176 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003177 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003178
Paolo Bonzini41063e12015-03-18 14:21:43 +01003179 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003180 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003181 false);
3182 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003183 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003184
bellard84b7b8e2005-11-28 21:19:04 +00003185 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003186 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003187#if defined(TARGET_WORDS_BIGENDIAN)
3188 if (endian == DEVICE_LITTLE_ENDIAN) {
3189 val = bswap64(val);
3190 }
3191#else
3192 if (endian == DEVICE_BIG_ENDIAN) {
3193 val = bswap64(val);
3194 }
3195#endif
bellard84b7b8e2005-11-28 21:19:04 +00003196 } else {
3197 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003198 ptr = qemu_get_ram_ptr(mr->ram_block,
3199 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003200 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003201 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003202 switch (endian) {
3203 case DEVICE_LITTLE_ENDIAN:
3204 val = ldq_le_p(ptr);
3205 break;
3206 case DEVICE_BIG_ENDIAN:
3207 val = ldq_be_p(ptr);
3208 break;
3209 default:
3210 val = ldq_p(ptr);
3211 break;
3212 }
Peter Maydell50013112015-04-26 16:49:24 +01003213 r = MEMTX_OK;
3214 }
3215 if (result) {
3216 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003217 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003218 if (release_lock) {
3219 qemu_mutex_unlock_iothread();
3220 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003221 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003222 return val;
3223}
3224
Peter Maydell50013112015-04-26 16:49:24 +01003225uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3226 MemTxAttrs attrs, MemTxResult *result)
3227{
3228 return address_space_ldq_internal(as, addr, attrs, result,
3229 DEVICE_NATIVE_ENDIAN);
3230}
3231
3232uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3233 MemTxAttrs attrs, MemTxResult *result)
3234{
3235 return address_space_ldq_internal(as, addr, attrs, result,
3236 DEVICE_LITTLE_ENDIAN);
3237}
3238
3239uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3240 MemTxAttrs attrs, MemTxResult *result)
3241{
3242 return address_space_ldq_internal(as, addr, attrs, result,
3243 DEVICE_BIG_ENDIAN);
3244}
3245
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003246uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003247{
Peter Maydell50013112015-04-26 16:49:24 +01003248 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003249}
3250
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003251uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003252{
Peter Maydell50013112015-04-26 16:49:24 +01003253 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003254}
3255
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003256uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003257{
Peter Maydell50013112015-04-26 16:49:24 +01003258 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003259}
3260
bellardaab33092005-10-30 20:48:42 +00003261/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003262uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3263 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003264{
3265 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003266 MemTxResult r;
3267
3268 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3269 if (result) {
3270 *result = r;
3271 }
bellardaab33092005-10-30 20:48:42 +00003272 return val;
3273}
3274
Peter Maydell50013112015-04-26 16:49:24 +01003275uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3276{
3277 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3278}
3279
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003280/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003281static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3282 hwaddr addr,
3283 MemTxAttrs attrs,
3284 MemTxResult *result,
3285 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003286{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003287 uint8_t *ptr;
3288 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003289 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003290 hwaddr l = 2;
3291 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003292 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003293 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003294
Paolo Bonzini41063e12015-03-18 14:21:43 +01003295 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003296 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003297 false);
3298 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003299 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003300
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003301 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003302 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003303#if defined(TARGET_WORDS_BIGENDIAN)
3304 if (endian == DEVICE_LITTLE_ENDIAN) {
3305 val = bswap16(val);
3306 }
3307#else
3308 if (endian == DEVICE_BIG_ENDIAN) {
3309 val = bswap16(val);
3310 }
3311#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003312 } else {
3313 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003314 ptr = qemu_get_ram_ptr(mr->ram_block,
3315 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003316 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003317 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003318 switch (endian) {
3319 case DEVICE_LITTLE_ENDIAN:
3320 val = lduw_le_p(ptr);
3321 break;
3322 case DEVICE_BIG_ENDIAN:
3323 val = lduw_be_p(ptr);
3324 break;
3325 default:
3326 val = lduw_p(ptr);
3327 break;
3328 }
Peter Maydell50013112015-04-26 16:49:24 +01003329 r = MEMTX_OK;
3330 }
3331 if (result) {
3332 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003333 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003334 if (release_lock) {
3335 qemu_mutex_unlock_iothread();
3336 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003337 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003338 return val;
bellardaab33092005-10-30 20:48:42 +00003339}
3340
Peter Maydell50013112015-04-26 16:49:24 +01003341uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3342 MemTxAttrs attrs, MemTxResult *result)
3343{
3344 return address_space_lduw_internal(as, addr, attrs, result,
3345 DEVICE_NATIVE_ENDIAN);
3346}
3347
3348uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3349 MemTxAttrs attrs, MemTxResult *result)
3350{
3351 return address_space_lduw_internal(as, addr, attrs, result,
3352 DEVICE_LITTLE_ENDIAN);
3353}
3354
3355uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3356 MemTxAttrs attrs, MemTxResult *result)
3357{
3358 return address_space_lduw_internal(as, addr, attrs, result,
3359 DEVICE_BIG_ENDIAN);
3360}
3361
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003362uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003363{
Peter Maydell50013112015-04-26 16:49:24 +01003364 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003365}
3366
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003367uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368{
Peter Maydell50013112015-04-26 16:49:24 +01003369 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370}
3371
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003372uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003373{
Peter Maydell50013112015-04-26 16:49:24 +01003374 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003375}
3376
bellard8df1cd02005-01-28 22:37:22 +00003377/* warning: addr must be aligned. The ram page is not masked as dirty
3378 and the code inside is not invalidated. It is useful if the dirty
3379 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003380void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3381 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003382{
bellard8df1cd02005-01-28 22:37:22 +00003383 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003384 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003385 hwaddr l = 4;
3386 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003387 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003388 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003389 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003390
Paolo Bonzini41063e12015-03-18 14:21:43 +01003391 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003392 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003393 true);
3394 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003395 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003396
Peter Maydell50013112015-04-26 16:49:24 +01003397 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003398 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003399 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003400 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003401 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003402
Paolo Bonzini845b6212015-03-23 11:45:53 +01003403 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3404 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003405 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003406 r = MEMTX_OK;
3407 }
3408 if (result) {
3409 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003410 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003411 if (release_lock) {
3412 qemu_mutex_unlock_iothread();
3413 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003414 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003415}
3416
Peter Maydell50013112015-04-26 16:49:24 +01003417void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3418{
3419 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3420}
3421
bellard8df1cd02005-01-28 22:37:22 +00003422/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003423static inline void address_space_stl_internal(AddressSpace *as,
3424 hwaddr addr, uint32_t val,
3425 MemTxAttrs attrs,
3426 MemTxResult *result,
3427 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003428{
bellard8df1cd02005-01-28 22:37:22 +00003429 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003430 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003431 hwaddr l = 4;
3432 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003433 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003434 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003435
Paolo Bonzini41063e12015-03-18 14:21:43 +01003436 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003437 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003438 true);
3439 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003440 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003441
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003442#if defined(TARGET_WORDS_BIGENDIAN)
3443 if (endian == DEVICE_LITTLE_ENDIAN) {
3444 val = bswap32(val);
3445 }
3446#else
3447 if (endian == DEVICE_BIG_ENDIAN) {
3448 val = bswap32(val);
3449 }
3450#endif
Peter Maydell50013112015-04-26 16:49:24 +01003451 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003452 } else {
bellard8df1cd02005-01-28 22:37:22 +00003453 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003454 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003455 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003456 switch (endian) {
3457 case DEVICE_LITTLE_ENDIAN:
3458 stl_le_p(ptr, val);
3459 break;
3460 case DEVICE_BIG_ENDIAN:
3461 stl_be_p(ptr, val);
3462 break;
3463 default:
3464 stl_p(ptr, val);
3465 break;
3466 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003467 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003468 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003469 }
Peter Maydell50013112015-04-26 16:49:24 +01003470 if (result) {
3471 *result = r;
3472 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003473 if (release_lock) {
3474 qemu_mutex_unlock_iothread();
3475 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003476 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003477}
3478
3479void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3480 MemTxAttrs attrs, MemTxResult *result)
3481{
3482 address_space_stl_internal(as, addr, val, attrs, result,
3483 DEVICE_NATIVE_ENDIAN);
3484}
3485
3486void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3487 MemTxAttrs attrs, MemTxResult *result)
3488{
3489 address_space_stl_internal(as, addr, val, attrs, result,
3490 DEVICE_LITTLE_ENDIAN);
3491}
3492
3493void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3494 MemTxAttrs attrs, MemTxResult *result)
3495{
3496 address_space_stl_internal(as, addr, val, attrs, result,
3497 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003498}
3499
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003500void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003501{
Peter Maydell50013112015-04-26 16:49:24 +01003502 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003503}
3504
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003505void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003506{
Peter Maydell50013112015-04-26 16:49:24 +01003507 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003508}
3509
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003510void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003511{
Peter Maydell50013112015-04-26 16:49:24 +01003512 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003513}
3514
bellardaab33092005-10-30 20:48:42 +00003515/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003516void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3517 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003518{
3519 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003520 MemTxResult r;
3521
3522 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3523 if (result) {
3524 *result = r;
3525 }
3526}
3527
3528void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3529{
3530 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003531}
3532
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003533/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003534static inline void address_space_stw_internal(AddressSpace *as,
3535 hwaddr addr, uint32_t val,
3536 MemTxAttrs attrs,
3537 MemTxResult *result,
3538 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003539{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003540 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003541 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003542 hwaddr l = 2;
3543 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003544 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003545 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003546
Paolo Bonzini41063e12015-03-18 14:21:43 +01003547 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003548 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003549 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003550 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003551
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003552#if defined(TARGET_WORDS_BIGENDIAN)
3553 if (endian == DEVICE_LITTLE_ENDIAN) {
3554 val = bswap16(val);
3555 }
3556#else
3557 if (endian == DEVICE_BIG_ENDIAN) {
3558 val = bswap16(val);
3559 }
3560#endif
Peter Maydell50013112015-04-26 16:49:24 +01003561 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003562 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003563 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003564 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003565 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003566 switch (endian) {
3567 case DEVICE_LITTLE_ENDIAN:
3568 stw_le_p(ptr, val);
3569 break;
3570 case DEVICE_BIG_ENDIAN:
3571 stw_be_p(ptr, val);
3572 break;
3573 default:
3574 stw_p(ptr, val);
3575 break;
3576 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003577 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003578 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003579 }
Peter Maydell50013112015-04-26 16:49:24 +01003580 if (result) {
3581 *result = r;
3582 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003583 if (release_lock) {
3584 qemu_mutex_unlock_iothread();
3585 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003586 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003587}
3588
3589void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3590 MemTxAttrs attrs, MemTxResult *result)
3591{
3592 address_space_stw_internal(as, addr, val, attrs, result,
3593 DEVICE_NATIVE_ENDIAN);
3594}
3595
3596void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3597 MemTxAttrs attrs, MemTxResult *result)
3598{
3599 address_space_stw_internal(as, addr, val, attrs, result,
3600 DEVICE_LITTLE_ENDIAN);
3601}
3602
3603void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3604 MemTxAttrs attrs, MemTxResult *result)
3605{
3606 address_space_stw_internal(as, addr, val, attrs, result,
3607 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003608}
3609
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003610void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003611{
Peter Maydell50013112015-04-26 16:49:24 +01003612 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003613}
3614
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003615void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003616{
Peter Maydell50013112015-04-26 16:49:24 +01003617 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003618}
3619
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003620void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003621{
Peter Maydell50013112015-04-26 16:49:24 +01003622 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003623}
3624
bellardaab33092005-10-30 20:48:42 +00003625/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003626void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3627 MemTxAttrs attrs, MemTxResult *result)
3628{
3629 MemTxResult r;
3630 val = tswap64(val);
3631 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3632 if (result) {
3633 *result = r;
3634 }
3635}
3636
3637void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3638 MemTxAttrs attrs, MemTxResult *result)
3639{
3640 MemTxResult r;
3641 val = cpu_to_le64(val);
3642 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3643 if (result) {
3644 *result = r;
3645 }
3646}
3647void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3648 MemTxAttrs attrs, MemTxResult *result)
3649{
3650 MemTxResult r;
3651 val = cpu_to_be64(val);
3652 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3653 if (result) {
3654 *result = r;
3655 }
3656}
3657
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003658void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003659{
Peter Maydell50013112015-04-26 16:49:24 +01003660 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003661}
3662
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003663void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003664{
Peter Maydell50013112015-04-26 16:49:24 +01003665 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003666}
3667
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003668void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003669{
Peter Maydell50013112015-04-26 16:49:24 +01003670 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003671}
3672
aliguori5e2972f2009-03-28 17:51:36 +00003673/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003674int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003675 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003676{
3677 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003678 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003679 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003680
3681 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003682 int asidx;
3683 MemTxAttrs attrs;
3684
bellard13eb76e2004-01-24 15:23:36 +00003685 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003686 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3687 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003688 /* if no physical page mapped, return an error */
3689 if (phys_addr == -1)
3690 return -1;
3691 l = (page + TARGET_PAGE_SIZE) - addr;
3692 if (l > len)
3693 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003694 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003695 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003696 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3697 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003698 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003699 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3700 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003701 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003702 }
bellard13eb76e2004-01-24 15:23:36 +00003703 len -= l;
3704 buf += l;
3705 addr += l;
3706 }
3707 return 0;
3708}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003709
3710/*
3711 * Allows code that needs to deal with migration bitmaps etc to still be built
3712 * target independent.
3713 */
3714size_t qemu_target_page_bits(void)
3715{
3716 return TARGET_PAGE_BITS;
3717}
3718
Paul Brooka68fe892010-03-01 00:08:59 +00003719#endif
bellard13eb76e2004-01-24 15:23:36 +00003720
Blue Swirl8e4a4242013-01-06 18:30:17 +00003721/*
3722 * A helper function for the _utterly broken_ virtio device model to find out if
3723 * it's running on a big endian machine. Don't do this at home kids!
3724 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003725bool target_words_bigendian(void);
3726bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003727{
3728#if defined(TARGET_WORDS_BIGENDIAN)
3729 return true;
3730#else
3731 return false;
3732#endif
3733}
3734
Wen Congyang76f35532012-05-07 12:04:18 +08003735#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003736bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003737{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003738 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003739 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003740 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003741
Paolo Bonzini41063e12015-03-18 14:21:43 +01003742 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003743 mr = address_space_translate(&address_space_memory,
3744 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003745
Paolo Bonzini41063e12015-03-18 14:21:43 +01003746 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3747 rcu_read_unlock();
3748 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003749}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003750
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003751int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003752{
3753 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003754 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003755
Mike Day0dc3f442013-09-05 14:41:35 -04003756 rcu_read_lock();
3757 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003758 ret = func(block->idstr, block->host, block->offset,
3759 block->used_length, opaque);
3760 if (ret) {
3761 break;
3762 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003763 }
Mike Day0dc3f442013-09-05 14:41:35 -04003764 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003765 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003766}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003767#endif