blob: f398d212f63f1342bc0833975033b25e054231bf [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000021#include <sys/mman.h>
22#endif
bellard54936002003-05-13 00:25:15 +000023
Stefan Weil055403b2010-10-22 23:03:32 +020024#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000026#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000027#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010028#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020029#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020033#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040048#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020049#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030051#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000052
Paolo Bonzini022c62c2012-12-17 18:19:49 +010053#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020054#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030055#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030058#ifndef _WIN32
59#include "qemu/mmap-alloc.h"
60#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061
blueswir1db7b5422007-05-26 17:36:03 +000062//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000063
pbrook99773bd2006-04-16 15:14:59 +000064#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040065/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
Mike Day0d53d9f2015-01-21 13:45:24 +010068RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030069
70static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030071static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030072
Avi Kivityf6790af2012-10-02 20:13:51 +020073AddressSpace address_space_io;
74AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020075
Paolo Bonzini0844e002013-05-24 14:37:28 +020076MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020077static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020078
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080079/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80#define RAM_PREALLOC (1 << 0)
81
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080082/* RAM is mmap-ed with MAP_SHARED */
83#define RAM_SHARED (1 << 1)
84
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020085/* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88#define RAM_RESIZEABLE (1 << 2)
89
pbrooke2eef172008-06-08 01:09:01 +000090#endif
bellard9fa3e852004-01-04 18:06:42 +000091
Andreas Färberbdc44642013-06-24 23:50:24 +020092struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000093/* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020095__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000096/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000097 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000098 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010099int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000100
pbrooke2eef172008-06-08 01:09:01 +0000101#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200102
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200103typedef struct PhysPageEntry PhysPageEntry;
104
105struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200109 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200110};
111
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100115#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200117#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118#define P_L2_SIZE (1 << P_L2_BITS)
119
120#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121
122typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200123
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200124typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100125 struct rcu_head rcu;
126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127 unsigned sections_nb;
128 unsigned sections_nb_alloc;
129 unsigned nodes_nb;
130 unsigned nodes_nb_alloc;
131 Node *nodes;
132 MemoryRegionSection *sections;
133} PhysPageMap;
134
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200135struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100136 struct rcu_head rcu;
137
Fam Zheng729633c2016-03-01 14:18:24 +0800138 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200143 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200144 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200145};
146
Jan Kiszka90260c62013-05-26 21:46:51 +0200147#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148typedef struct subpage_t {
149 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200150 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153} subpage_t;
154
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200155#define PHYS_SECTION_UNASSIGNED 0
156#define PHYS_SECTION_NOTDIRTY 1
157#define PHYS_SECTION_ROM 2
158#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200159
pbrooke2eef172008-06-08 01:09:01 +0000160static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300161static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000162static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000163
Avi Kivity1ec9b902012-01-02 12:47:48 +0200164static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100165
166/**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
pbrook6658ffb2007-03-16 23:58:11 +0000180#endif
bellard54936002003-05-13 00:25:15 +0000181
Paul Brook6d9a1302010-02-28 23:55:53 +0000182#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
191}
192
Paolo Bonzinidb946042015-05-21 15:12:29 +0200193static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194{
195 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200196 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197 PhysPageEntry e;
198 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211}
212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200215 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216{
217 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200221 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200227 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200228 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200229 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 *index += step;
231 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200232 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200234 }
235 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200236 }
237}
238
Avi Kivityac1970f2012-10-03 16:22:53 +0200239static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200240 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200241 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000242{
Avi Kivity29990972012-02-13 20:21:20 +0200243 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000247}
248
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200249/* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253{
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300}
301
302static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303{
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308 }
309}
310
Fam Zheng29cb5332016-03-01 14:18:23 +0800311static inline bool section_covers_addr(const MemoryRegionSection *section,
312 hwaddr addr)
313{
314 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
315 * the section must cover the entire address space.
316 */
317 return section->size.hi ||
318 range_covers_byte(section->offset_within_address_space,
319 section->size.lo, addr);
320}
321
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200322static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200323 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000324{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200325 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200326 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200327 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200328
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200329 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200330 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200331 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200332 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200333 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100334 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200335 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200336
Fam Zheng29cb5332016-03-01 14:18:23 +0800337 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200338 return &sections[lp.ptr];
339 } else {
340 return &sections[PHYS_SECTION_UNASSIGNED];
341 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200342}
343
Blue Swirle5548612012-04-21 13:08:33 +0000344bool memory_region_is_unassigned(MemoryRegion *mr)
345{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200346 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000347 && mr != &io_mem_watch;
348}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100350/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200351static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200352 hwaddr addr,
353 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200354{
Fam Zheng729633c2016-03-01 14:18:24 +0800355 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800357 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200358
Fam Zheng729633c2016-03-01 14:18:24 +0800359 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
360 section_covers_addr(section, addr)) {
361 update = false;
362 } else {
363 section = phys_page_find(d->phys_map, addr, d->map.nodes,
364 d->map.sections);
365 update = true;
366 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200367 if (resolve_subpage && section->mr->subpage) {
368 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200369 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200370 }
Fam Zheng729633c2016-03-01 14:18:24 +0800371 if (update) {
372 atomic_set(&d->mru_section, section);
373 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200374 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200375}
376
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100377/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200378static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200379address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200381{
382 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200383 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100384 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200385
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200386 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387 /* Compute offset within MemoryRegionSection */
388 addr -= section->offset_within_address_space;
389
390 /* Compute offset within MemoryRegion */
391 *xlat = addr + section->offset_within_region;
392
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200393 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200394
395 /* MMIO registers can be expected to perform full-width accesses based only
396 * on their address, without considering adjacent registers that could
397 * decode to completely different MemoryRegions. When such registers
398 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
399 * regions overlap wildly. For this reason we cannot clamp the accesses
400 * here.
401 *
402 * If the length is small (as is the case for address_space_ldl/stl),
403 * everything works fine. If the incoming length is large, however,
404 * the caller really has to do the clamping through memory_access_size.
405 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200406 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200407 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200408 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
409 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200410 return section;
411}
Jan Kiszka90260c62013-05-26 21:46:51 +0200412
Paolo Bonzini41063e12015-03-18 14:21:43 +0100413/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200414MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
415 hwaddr *xlat, hwaddr *plen,
416 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200417{
Avi Kivity30951152012-10-30 13:47:46 +0200418 IOMMUTLBEntry iotlb;
419 MemoryRegionSection *section;
420 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200421
422 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100423 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
424 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200425 mr = section->mr;
426
427 if (!mr->iommu_ops) {
428 break;
429 }
430
Le Tan8d7b8cb2014-08-16 13:55:37 +0800431 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200432 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
433 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700434 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200435 if (!(iotlb.perm & (1 << is_write))) {
436 mr = &io_mem_unassigned;
437 break;
438 }
439
440 as = iotlb.target_as;
441 }
442
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000443 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100444 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700445 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100446 }
447
Avi Kivity30951152012-10-30 13:47:46 +0200448 *xlat = addr;
449 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200450}
451
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100452/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200453MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000454address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200455 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200456{
Avi Kivity30951152012-10-30 13:47:46 +0200457 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000458 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
459
460 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200461
462 assert(!section->mr->iommu_ops);
463 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200464}
bellard9fa3e852004-01-04 18:06:42 +0000465#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000466
Andreas Färberb170fce2013-01-20 20:23:22 +0100467#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000468
Juan Quintelae59fb372009-09-29 22:48:21 +0200469static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200470{
Andreas Färber259186a2013-01-17 18:51:17 +0100471 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200472
aurel323098dba2009-03-07 21:28:24 +0000473 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
474 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100475 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100476 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000477
478 return 0;
479}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200480
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400481static int cpu_common_pre_load(void *opaque)
482{
483 CPUState *cpu = opaque;
484
Paolo Bonziniadee6422014-12-19 12:53:14 +0100485 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400486
487 return 0;
488}
489
490static bool cpu_common_exception_index_needed(void *opaque)
491{
492 CPUState *cpu = opaque;
493
Paolo Bonziniadee6422014-12-19 12:53:14 +0100494 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400495}
496
497static const VMStateDescription vmstate_cpu_common_exception_index = {
498 .name = "cpu_common/exception_index",
499 .version_id = 1,
500 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200501 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400502 .fields = (VMStateField[]) {
503 VMSTATE_INT32(exception_index, CPUState),
504 VMSTATE_END_OF_LIST()
505 }
506};
507
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300508static bool cpu_common_crash_occurred_needed(void *opaque)
509{
510 CPUState *cpu = opaque;
511
512 return cpu->crash_occurred;
513}
514
515static const VMStateDescription vmstate_cpu_common_crash_occurred = {
516 .name = "cpu_common/crash_occurred",
517 .version_id = 1,
518 .minimum_version_id = 1,
519 .needed = cpu_common_crash_occurred_needed,
520 .fields = (VMStateField[]) {
521 VMSTATE_BOOL(crash_occurred, CPUState),
522 VMSTATE_END_OF_LIST()
523 }
524};
525
Andreas Färber1a1562f2013-06-17 04:09:11 +0200526const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200527 .name = "cpu_common",
528 .version_id = 1,
529 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400530 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200531 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200532 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100533 VMSTATE_UINT32(halted, CPUState),
534 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200537 .subsections = (const VMStateDescription*[]) {
538 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300539 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200540 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 }
542};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200543
pbrook9656f322008-07-01 20:01:19 +0000544#endif
545
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100546CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400547{
Andreas Färberbdc44642013-06-24 23:50:24 +0200548 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400549
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100551 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200552 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100553 }
Glauber Costa950f1472009-06-09 12:15:18 -0400554 }
555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400557}
558
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000559#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000560void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000561{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000562 CPUAddressSpace *newas;
563
564 /* Target code should have set num_ases before calling us */
565 assert(asidx < cpu->num_ases);
566
Peter Maydell56943e82016-01-21 14:15:04 +0000567 if (asidx == 0) {
568 /* address space 0 gets the convenience alias */
569 cpu->as = as;
570 }
571
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000572 /* KVM cannot currently support multiple address spaces. */
573 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000574
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000575 if (!cpu->cpu_ases) {
576 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000577 }
Peter Maydell32857f42015-10-01 15:29:50 +0100578
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000579 newas = &cpu->cpu_ases[asidx];
580 newas->cpu = cpu;
581 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000582 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000583 newas->tcg_as_listener.commit = tcg_commit;
584 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000585 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000586}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000587
588AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
589{
590 /* Return the AddressSpace corresponding to the specified index */
591 return cpu->cpu_ases[asidx].as;
592}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000593#endif
594
Bharata B Raob7bca732015-06-23 19:31:13 -0700595#ifndef CONFIG_USER_ONLY
596static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
597
598static int cpu_get_free_index(Error **errp)
599{
600 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
601
602 if (cpu >= MAX_CPUMASK_BITS) {
603 error_setg(errp, "Trying to use more CPUs than max of %d",
604 MAX_CPUMASK_BITS);
605 return -1;
606 }
607
608 bitmap_set(cpu_index_map, cpu, 1);
609 return cpu;
610}
611
612void cpu_exec_exit(CPUState *cpu)
613{
614 if (cpu->cpu_index == -1) {
615 /* cpu_index was never allocated by this @cpu or was already freed. */
616 return;
617 }
618
619 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
620 cpu->cpu_index = -1;
621}
622#else
623
624static int cpu_get_free_index(Error **errp)
625{
626 CPUState *some_cpu;
627 int cpu_index = 0;
628
629 CPU_FOREACH(some_cpu) {
630 cpu_index++;
631 }
632 return cpu_index;
633}
634
635void cpu_exec_exit(CPUState *cpu)
636{
637}
638#endif
639
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700640void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000641{
Andreas Färberb170fce2013-01-20 20:23:22 +0100642 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000643 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700644 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000645
Peter Maydell56943e82016-01-21 14:15:04 +0000646 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000647 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000648
Eduardo Habkost291135b2015-04-27 17:00:33 -0300649#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300650 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000651
652 /* This is a softmmu CPU object, so create a property for it
653 * so users can wire up its memory. (This can't go in qom/cpu.c
654 * because that file is compiled only once for both user-mode
655 * and system builds.) The default if no link is set up is to use
656 * the system address space.
657 */
658 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
659 (Object **)&cpu->memory,
660 qdev_prop_allow_set_link_before_realize,
661 OBJ_PROP_LINK_UNREF_ON_RELEASE,
662 &error_abort);
663 cpu->memory = system_memory;
664 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300665#endif
666
pbrookc2764712009-03-07 15:24:59 +0000667#if defined(CONFIG_USER_ONLY)
668 cpu_list_lock();
669#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700670 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
671 if (local_err) {
672 error_propagate(errp, local_err);
673#if defined(CONFIG_USER_ONLY)
674 cpu_list_unlock();
675#endif
676 return;
bellard6a00d602005-11-21 23:25:50 +0000677 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200678 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000679#if defined(CONFIG_USER_ONLY)
680 cpu_list_unlock();
681#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200682 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
683 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
684 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100685 if (cc->vmsd != NULL) {
686 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
687 }
bellardfd6ce8f2003-05-14 19:00:11 +0000688}
689
Paul Brook94df27f2010-02-28 23:47:45 +0000690#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200691static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000692{
693 tb_invalidate_phys_page_range(pc, pc + 1, 0);
694}
695#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200696static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400697{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000698 MemTxAttrs attrs;
699 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
700 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400701 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000702 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100703 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400704 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400705}
bellardc27004e2005-01-03 23:35:10 +0000706#endif
bellardd720b932004-04-25 17:57:43 +0000707
Paul Brookc527ee82010-03-01 03:31:14 +0000708#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200709void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000710
711{
712}
713
Peter Maydell3ee887e2014-09-12 14:06:48 +0100714int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
715 int flags)
716{
717 return -ENOSYS;
718}
719
720void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
721{
722}
723
Andreas Färber75a34032013-09-02 16:57:02 +0200724int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000725 int flags, CPUWatchpoint **watchpoint)
726{
727 return -ENOSYS;
728}
729#else
pbrook6658ffb2007-03-16 23:58:11 +0000730/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200731int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000732 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000733{
aliguoric0ce9982008-11-25 22:13:57 +0000734 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000735
Peter Maydell05068c02014-09-12 14:06:48 +0100736 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700737 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200738 error_report("tried to set invalid watchpoint at %"
739 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000740 return -EINVAL;
741 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500742 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000743
aliguoria1d1bb32008-11-18 20:07:32 +0000744 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100745 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000746 wp->flags = flags;
747
aliguori2dc9f412008-11-18 20:56:59 +0000748 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200749 if (flags & BP_GDB) {
750 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
751 } else {
752 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
753 }
aliguoria1d1bb32008-11-18 20:07:32 +0000754
Andreas Färber31b030d2013-09-04 01:29:02 +0200755 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000756
757 if (watchpoint)
758 *watchpoint = wp;
759 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000760}
761
aliguoria1d1bb32008-11-18 20:07:32 +0000762/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200763int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000764 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000765{
aliguoria1d1bb32008-11-18 20:07:32 +0000766 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000767
Andreas Färberff4700b2013-08-26 18:23:18 +0200768 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100769 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000770 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200771 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000772 return 0;
773 }
774 }
aliguoria1d1bb32008-11-18 20:07:32 +0000775 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000776}
777
aliguoria1d1bb32008-11-18 20:07:32 +0000778/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200779void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000780{
Andreas Färberff4700b2013-08-26 18:23:18 +0200781 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000782
Andreas Färber31b030d2013-09-04 01:29:02 +0200783 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000784
Anthony Liguori7267c092011-08-20 22:09:37 -0500785 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000786}
787
aliguoria1d1bb32008-11-18 20:07:32 +0000788/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200789void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000790{
aliguoric0ce9982008-11-25 22:13:57 +0000791 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000792
Andreas Färberff4700b2013-08-26 18:23:18 +0200793 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200794 if (wp->flags & mask) {
795 cpu_watchpoint_remove_by_ref(cpu, wp);
796 }
aliguoric0ce9982008-11-25 22:13:57 +0000797 }
aliguoria1d1bb32008-11-18 20:07:32 +0000798}
Peter Maydell05068c02014-09-12 14:06:48 +0100799
800/* Return true if this watchpoint address matches the specified
801 * access (ie the address range covered by the watchpoint overlaps
802 * partially or completely with the address range covered by the
803 * access).
804 */
805static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
806 vaddr addr,
807 vaddr len)
808{
809 /* We know the lengths are non-zero, but a little caution is
810 * required to avoid errors in the case where the range ends
811 * exactly at the top of the address space and so addr + len
812 * wraps round to zero.
813 */
814 vaddr wpend = wp->vaddr + wp->len - 1;
815 vaddr addrend = addr + len - 1;
816
817 return !(addr > wpend || wp->vaddr > addrend);
818}
819
Paul Brookc527ee82010-03-01 03:31:14 +0000820#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000821
822/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200823int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000824 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000825{
aliguoric0ce9982008-11-25 22:13:57 +0000826 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000827
Anthony Liguori7267c092011-08-20 22:09:37 -0500828 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000829
830 bp->pc = pc;
831 bp->flags = flags;
832
aliguori2dc9f412008-11-18 20:56:59 +0000833 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200834 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200835 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200836 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200838 }
aliguoria1d1bb32008-11-18 20:07:32 +0000839
Andreas Färberf0c3c502013-08-26 21:22:53 +0200840 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färber00b941e2013-06-29 18:55:54 +0200842 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000843 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200844 }
aliguoria1d1bb32008-11-18 20:07:32 +0000845 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000846}
847
848/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200849int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000850{
aliguoria1d1bb32008-11-18 20:07:32 +0000851 CPUBreakpoint *bp;
852
Andreas Färberf0c3c502013-08-26 21:22:53 +0200853 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000854 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200855 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000856 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000857 }
bellard4c3a88a2003-07-26 12:06:08 +0000858 }
aliguoria1d1bb32008-11-18 20:07:32 +0000859 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000860}
861
aliguoria1d1bb32008-11-18 20:07:32 +0000862/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200863void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000864{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200865 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
866
867 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000868
Anthony Liguori7267c092011-08-20 22:09:37 -0500869 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000870}
871
872/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200873void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000874{
aliguoric0ce9982008-11-25 22:13:57 +0000875 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000876
Andreas Färberf0c3c502013-08-26 21:22:53 +0200877 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200878 if (bp->flags & mask) {
879 cpu_breakpoint_remove_by_ref(cpu, bp);
880 }
aliguoric0ce9982008-11-25 22:13:57 +0000881 }
bellard4c3a88a2003-07-26 12:06:08 +0000882}
883
bellardc33a3462003-07-29 20:50:33 +0000884/* enable or disable single step mode. EXCP_DEBUG is returned by the
885 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200886void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000887{
Andreas Färbered2803d2013-06-21 20:20:45 +0200888 if (cpu->singlestep_enabled != enabled) {
889 cpu->singlestep_enabled = enabled;
890 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200891 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200892 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100893 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000894 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700895 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000896 }
bellardc33a3462003-07-29 20:50:33 +0000897 }
bellardc33a3462003-07-29 20:50:33 +0000898}
899
Andreas Färbera47dddd2013-09-03 17:38:47 +0200900void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000901{
902 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000903 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000904
905 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000906 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000907 fprintf(stderr, "qemu: fatal: ");
908 vfprintf(stderr, fmt, ap);
909 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200910 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100911 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000912 qemu_log("qemu: fatal: ");
913 qemu_log_vprintf(fmt, ap2);
914 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200915 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000916 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000917 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000918 }
pbrook493ae1f2007-11-23 16:53:59 +0000919 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000920 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300921 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200922#if defined(CONFIG_USER_ONLY)
923 {
924 struct sigaction act;
925 sigfillset(&act.sa_mask);
926 act.sa_handler = SIG_DFL;
927 sigaction(SIGABRT, &act, NULL);
928 }
929#endif
bellard75012672003-06-21 13:11:07 +0000930 abort();
931}
932
bellard01243112004-01-04 15:48:17 +0000933#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400934/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200935static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
936{
937 RAMBlock *block;
938
Paolo Bonzini43771532013-09-09 17:58:40 +0200939 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200940 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200941 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200942 }
Mike Day0dc3f442013-09-05 14:41:35 -0400943 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200944 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200945 goto found;
946 }
947 }
948
949 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
950 abort();
951
952found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200953 /* It is safe to write mru_block outside the iothread lock. This
954 * is what happens:
955 *
956 * mru_block = xxx
957 * rcu_read_unlock()
958 * xxx removed from list
959 * rcu_read_lock()
960 * read mru_block
961 * mru_block = NULL;
962 * call_rcu(reclaim_ramblock, xxx);
963 * rcu_read_unlock()
964 *
965 * atomic_rcu_set is not needed here. The block was already published
966 * when it was placed into the list. Here we're just making an extra
967 * copy of the pointer.
968 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200969 ram_list.mru_block = block;
970 return block;
971}
972
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200973static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000974{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700975 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200976 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200977 RAMBlock *block;
978 ram_addr_t end;
979
980 end = TARGET_PAGE_ALIGN(start + length);
981 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000982
Mike Day0dc3f442013-09-05 14:41:35 -0400983 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200984 block = qemu_get_ram_block(start);
985 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200986 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700987 CPU_FOREACH(cpu) {
988 tlb_reset_dirty(cpu, start1, length);
989 }
Mike Day0dc3f442013-09-05 14:41:35 -0400990 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200991}
992
993/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000994bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
995 ram_addr_t length,
996 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200997{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000998 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000999 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001000 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001001
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001002 if (length == 0) {
1003 return false;
1004 }
1005
1006 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1007 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001008
1009 rcu_read_lock();
1010
1011 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1012
1013 while (page < end) {
1014 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1015 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1016 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1017
1018 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1019 offset, num);
1020 page += num;
1021 }
1022
1023 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001024
1025 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001026 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001027 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001028
1029 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001030}
1031
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001032/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001033hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001034 MemoryRegionSection *section,
1035 target_ulong vaddr,
1036 hwaddr paddr, hwaddr xlat,
1037 int prot,
1038 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001039{
Avi Kivitya8170e52012-10-23 12:30:10 +02001040 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001041 CPUWatchpoint *wp;
1042
Blue Swirlcc5bea62012-04-14 14:56:48 +00001043 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001044 /* Normal RAM. */
1045 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001046 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001047 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001048 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001049 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001050 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001051 }
1052 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001053 AddressSpaceDispatch *d;
1054
1055 d = atomic_rcu_read(&section->address_space->dispatch);
1056 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001057 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001058 }
1059
1060 /* Make accesses to pages with watchpoints go via the
1061 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001062 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001063 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001064 /* Avoid trapping reads of pages with a write breakpoint. */
1065 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001066 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001067 *address |= TLB_MMIO;
1068 break;
1069 }
1070 }
1071 }
1072
1073 return iotlb;
1074}
bellard9fa3e852004-01-04 18:06:42 +00001075#endif /* defined(CONFIG_USER_ONLY) */
1076
pbrooke2eef172008-06-08 01:09:01 +00001077#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001078
Anthony Liguoric227f092009-10-01 16:12:16 -05001079static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001080 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001081static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001082
Igor Mammedova2b257d2014-10-31 16:38:37 +00001083static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1084 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001085
1086/*
1087 * Set a custom physical guest memory alloator.
1088 * Accelerators with unusual needs may need this. Hopefully, we can
1089 * get rid of it eventually.
1090 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001091void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001092{
1093 phys_mem_alloc = alloc;
1094}
1095
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001096static uint16_t phys_section_add(PhysPageMap *map,
1097 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001098{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001099 /* The physical section number is ORed with a page-aligned
1100 * pointer to produce the iotlb entries. Thus it should
1101 * never overflow into the page-aligned value.
1102 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001103 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001104
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001105 if (map->sections_nb == map->sections_nb_alloc) {
1106 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1107 map->sections = g_renew(MemoryRegionSection, map->sections,
1108 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001109 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001110 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001111 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001112 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001113}
1114
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001115static void phys_section_destroy(MemoryRegion *mr)
1116{
Don Slutz55b4e802015-11-30 17:11:04 -05001117 bool have_sub_page = mr->subpage;
1118
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001119 memory_region_unref(mr);
1120
Don Slutz55b4e802015-11-30 17:11:04 -05001121 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001122 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001123 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001124 g_free(subpage);
1125 }
1126}
1127
Paolo Bonzini60926662013-05-29 12:30:26 +02001128static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001129{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001130 while (map->sections_nb > 0) {
1131 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001132 phys_section_destroy(section->mr);
1133 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001134 g_free(map->sections);
1135 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001136}
1137
Avi Kivityac1970f2012-10-03 16:22:53 +02001138static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001139{
1140 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001141 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001142 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001143 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001144 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001145 MemoryRegionSection subsection = {
1146 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001147 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001148 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001149 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001150
Avi Kivityf3705d52012-03-08 16:16:34 +02001151 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001152
Avi Kivityf3705d52012-03-08 16:16:34 +02001153 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001154 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001155 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001156 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001157 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001158 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001160 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001161 }
1162 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001163 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001164 subpage_register(subpage, start, end,
1165 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001166}
1167
1168
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001169static void register_multipage(AddressSpaceDispatch *d,
1170 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001171{
Avi Kivitya8170e52012-10-23 12:30:10 +02001172 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001173 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001174 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1175 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001176
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001177 assert(num_pages);
1178 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001179}
1180
Avi Kivityac1970f2012-10-03 16:22:53 +02001181static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001182{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001183 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001184 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001185 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001186 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001187
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001188 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1189 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1190 - now.offset_within_address_space;
1191
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001192 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001193 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001194 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001195 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001196 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001197 while (int128_ne(remain.size, now.size)) {
1198 remain.size = int128_sub(remain.size, now.size);
1199 remain.offset_within_address_space += int128_get64(now.size);
1200 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001201 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001202 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001203 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001204 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001205 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001206 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001207 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001208 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001209 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001210 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001211 }
1212}
1213
Sheng Yang62a27442010-01-26 19:21:16 +08001214void qemu_flush_coalesced_mmio_buffer(void)
1215{
1216 if (kvm_enabled())
1217 kvm_flush_coalesced_mmio_buffer();
1218}
1219
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001220void qemu_mutex_lock_ramlist(void)
1221{
1222 qemu_mutex_lock(&ram_list.mutex);
1223}
1224
1225void qemu_mutex_unlock_ramlist(void)
1226{
1227 qemu_mutex_unlock(&ram_list.mutex);
1228}
1229
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001230#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001231static void *file_ram_alloc(RAMBlock *block,
1232 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001233 const char *path,
1234 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001235{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001236 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001238 char *sanitized_name;
1239 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001240 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001241 int fd;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001242 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001243
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001244 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1245 error_setg(errp,
1246 "host lacks kvm mmu notifiers, -mem-path unsupported");
1247 return NULL;
1248 }
1249
1250 for (;;) {
1251 fd = open(path, O_RDWR);
1252 if (fd >= 0) {
1253 /* @path names an existing file, use it */
1254 break;
1255 }
1256 if (errno == ENOENT) {
1257 /* @path names a file that doesn't exist, create it */
1258 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1259 if (fd >= 0) {
1260 unlink_on_error = true;
1261 break;
1262 }
1263 } else if (errno == EISDIR) {
1264 /* @path names a directory, create a file there */
1265 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1266 sanitized_name = g_strdup(memory_region_name(block->mr));
1267 for (c = sanitized_name; *c != '\0'; c++) {
1268 if (*c == '/') {
1269 *c = '_';
1270 }
1271 }
1272
1273 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1274 sanitized_name);
1275 g_free(sanitized_name);
1276
1277 fd = mkstemp(filename);
1278 if (fd >= 0) {
1279 unlink(filename);
1280 g_free(filename);
1281 break;
1282 }
1283 g_free(filename);
1284 }
1285 if (errno != EEXIST && errno != EINTR) {
1286 error_setg_errno(errp, errno,
1287 "can't open backing store %s for guest RAM",
1288 path);
1289 goto error;
1290 }
1291 /*
1292 * Try again on EINTR and EEXIST. The latter happens when
1293 * something else creates the file between our two open().
1294 */
1295 }
1296
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001297 page_size = qemu_fd_getpagesize(fd);
1298 block->mr->align = page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001299
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001300 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001301 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001302 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001303 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001304 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001305 }
1306
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001307 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001308
1309 /*
1310 * ftruncate is not supported by hugetlbfs in older
1311 * hosts, so don't bother bailing out on errors.
1312 * If anything goes wrong with it under other filesystems,
1313 * mmap will fail.
1314 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001315 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001316 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001317 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001318
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001319 area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001320 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001321 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001322 "unable to map backing store for guest RAM");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001323 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001324 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001325 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001326
1327 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001328 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001329 }
1330
Alex Williamson04b16652010-07-02 11:13:17 -06001331 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001332 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001333
1334error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001335 if (unlink_on_error) {
1336 unlink(path);
1337 }
1338 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001339 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001340}
1341#endif
1342
Mike Day0dc3f442013-09-05 14:41:35 -04001343/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001344static ram_addr_t find_ram_offset(ram_addr_t size)
1345{
Alex Williamson04b16652010-07-02 11:13:17 -06001346 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001347 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001348
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001349 assert(size != 0); /* it would hand out same offset multiple times */
1350
Mike Day0dc3f442013-09-05 14:41:35 -04001351 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001352 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001353 }
Alex Williamson04b16652010-07-02 11:13:17 -06001354
Mike Day0dc3f442013-09-05 14:41:35 -04001355 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001356 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001357
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001358 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001359
Mike Day0dc3f442013-09-05 14:41:35 -04001360 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001361 if (next_block->offset >= end) {
1362 next = MIN(next, next_block->offset);
1363 }
1364 }
1365 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001366 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001367 mingap = next - end;
1368 }
1369 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001370
1371 if (offset == RAM_ADDR_MAX) {
1372 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1373 (uint64_t)size);
1374 abort();
1375 }
1376
Alex Williamson04b16652010-07-02 11:13:17 -06001377 return offset;
1378}
1379
Juan Quintela652d7ec2012-07-20 10:37:54 +02001380ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001381{
Alex Williamsond17b5282010-06-25 11:08:38 -06001382 RAMBlock *block;
1383 ram_addr_t last = 0;
1384
Mike Day0dc3f442013-09-05 14:41:35 -04001385 rcu_read_lock();
1386 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001387 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001388 }
Mike Day0dc3f442013-09-05 14:41:35 -04001389 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001390 return last;
1391}
1392
Jason Baronddb97f12012-08-02 15:44:16 -04001393static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1394{
1395 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001396
1397 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001398 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001399 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1400 if (ret) {
1401 perror("qemu_madvise");
1402 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1403 "but dump_guest_core=off specified\n");
1404 }
1405 }
1406}
1407
Mike Day0dc3f442013-09-05 14:41:35 -04001408/* Called within an RCU critical section, or while the ramlist lock
1409 * is held.
1410 */
Hu Tao20cfe882014-04-02 15:13:26 +08001411static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001412{
Hu Tao20cfe882014-04-02 15:13:26 +08001413 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001414
Mike Day0dc3f442013-09-05 14:41:35 -04001415 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001416 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001417 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001418 }
1419 }
Hu Tao20cfe882014-04-02 15:13:26 +08001420
1421 return NULL;
1422}
1423
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001424const char *qemu_ram_get_idstr(RAMBlock *rb)
1425{
1426 return rb->idstr;
1427}
1428
Mike Dayae3a7042013-09-05 14:41:35 -04001429/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001430void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1431{
Mike Dayae3a7042013-09-05 14:41:35 -04001432 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001433
Mike Day0dc3f442013-09-05 14:41:35 -04001434 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001435 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001436 assert(new_block);
1437 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001438
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001439 if (dev) {
1440 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001441 if (id) {
1442 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001443 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001444 }
1445 }
1446 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1447
Mike Day0dc3f442013-09-05 14:41:35 -04001448 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001449 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001450 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1451 new_block->idstr);
1452 abort();
1453 }
1454 }
Mike Day0dc3f442013-09-05 14:41:35 -04001455 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001456}
1457
Mike Dayae3a7042013-09-05 14:41:35 -04001458/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001459void qemu_ram_unset_idstr(ram_addr_t addr)
1460{
Mike Dayae3a7042013-09-05 14:41:35 -04001461 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001462
Mike Dayae3a7042013-09-05 14:41:35 -04001463 /* FIXME: arch_init.c assumes that this is not called throughout
1464 * migration. Ignore the problem since hot-unplug during migration
1465 * does not work anyway.
1466 */
1467
Mike Day0dc3f442013-09-05 14:41:35 -04001468 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001469 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001470 if (block) {
1471 memset(block->idstr, 0, sizeof(block->idstr));
1472 }
Mike Day0dc3f442013-09-05 14:41:35 -04001473 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001474}
1475
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001476static int memory_try_enable_merging(void *addr, size_t len)
1477{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001478 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001479 /* disabled by the user */
1480 return 0;
1481 }
1482
1483 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1484}
1485
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001486/* Only legal before guest might have detected the memory size: e.g. on
1487 * incoming migration, or right after reset.
1488 *
1489 * As memory core doesn't know how is memory accessed, it is up to
1490 * resize callback to update device state and/or add assertions to detect
1491 * misuse, if necessary.
1492 */
1493int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1494{
1495 RAMBlock *block = find_ram_block(base);
1496
1497 assert(block);
1498
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001499 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001500
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001501 if (block->used_length == newsize) {
1502 return 0;
1503 }
1504
1505 if (!(block->flags & RAM_RESIZEABLE)) {
1506 error_setg_errno(errp, EINVAL,
1507 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1508 " in != 0x" RAM_ADDR_FMT, block->idstr,
1509 newsize, block->used_length);
1510 return -EINVAL;
1511 }
1512
1513 if (block->max_length < newsize) {
1514 error_setg_errno(errp, EINVAL,
1515 "Length too large: %s: 0x" RAM_ADDR_FMT
1516 " > 0x" RAM_ADDR_FMT, block->idstr,
1517 newsize, block->max_length);
1518 return -EINVAL;
1519 }
1520
1521 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1522 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001523 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1524 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001525 memory_region_set_size(block->mr, newsize);
1526 if (block->resized) {
1527 block->resized(block->idstr, newsize, block->host);
1528 }
1529 return 0;
1530}
1531
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001532/* Called with ram_list.mutex held */
1533static void dirty_memory_extend(ram_addr_t old_ram_size,
1534 ram_addr_t new_ram_size)
1535{
1536 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1537 DIRTY_MEMORY_BLOCK_SIZE);
1538 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1539 DIRTY_MEMORY_BLOCK_SIZE);
1540 int i;
1541
1542 /* Only need to extend if block count increased */
1543 if (new_num_blocks <= old_num_blocks) {
1544 return;
1545 }
1546
1547 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1548 DirtyMemoryBlocks *old_blocks;
1549 DirtyMemoryBlocks *new_blocks;
1550 int j;
1551
1552 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1553 new_blocks = g_malloc(sizeof(*new_blocks) +
1554 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1555
1556 if (old_num_blocks) {
1557 memcpy(new_blocks->blocks, old_blocks->blocks,
1558 old_num_blocks * sizeof(old_blocks->blocks[0]));
1559 }
1560
1561 for (j = old_num_blocks; j < new_num_blocks; j++) {
1562 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1563 }
1564
1565 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1566
1567 if (old_blocks) {
1568 g_free_rcu(old_blocks, rcu);
1569 }
1570 }
1571}
1572
Fam Zheng528f46a2016-03-01 14:18:18 +08001573static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001574{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001575 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001576 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001577 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001578 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001579
1580 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001581
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001582 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001583 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001584
1585 if (!new_block->host) {
1586 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001587 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001588 new_block->mr, &err);
1589 if (err) {
1590 error_propagate(errp, err);
1591 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001592 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001593 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001594 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001595 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001596 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001597 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001598 error_setg_errno(errp, errno,
1599 "cannot set up guest memory '%s'",
1600 memory_region_name(new_block->mr));
1601 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001602 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001603 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001604 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001605 }
1606 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001607
Li Zhijiandd631692015-07-02 20:18:06 +08001608 new_ram_size = MAX(old_ram_size,
1609 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1610 if (new_ram_size > old_ram_size) {
1611 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001612 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001613 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001614 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1615 * QLIST (which has an RCU-friendly variant) does not have insertion at
1616 * tail, so save the last element in last_block.
1617 */
Mike Day0dc3f442013-09-05 14:41:35 -04001618 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001619 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001620 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001621 break;
1622 }
1623 }
1624 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001625 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001626 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001627 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001628 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001629 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001630 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001631 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001632
Mike Day0dc3f442013-09-05 14:41:35 -04001633 /* Write list before version */
1634 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001635 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001636 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001637
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001638 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001639 new_block->used_length,
1640 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001641
Paolo Bonzinia904c912015-01-21 16:18:35 +01001642 if (new_block->host) {
1643 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1644 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1645 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1646 if (kvm_enabled()) {
1647 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1648 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001649 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001650}
1651
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001652#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001653RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1654 bool share, const char *mem_path,
1655 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001656{
1657 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001658 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001659
1660 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001661 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001662 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001663 }
1664
1665 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1666 /*
1667 * file_ram_alloc() needs to allocate just like
1668 * phys_mem_alloc, but we haven't bothered to provide
1669 * a hook there.
1670 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001671 error_setg(errp,
1672 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001673 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001674 }
1675
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001676 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001677 new_block = g_malloc0(sizeof(*new_block));
1678 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001679 new_block->used_length = size;
1680 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001681 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001682 new_block->host = file_ram_alloc(new_block, size,
1683 mem_path, errp);
1684 if (!new_block->host) {
1685 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001686 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001687 }
1688
Fam Zheng528f46a2016-03-01 14:18:18 +08001689 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001690 if (local_err) {
1691 g_free(new_block);
1692 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001693 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001694 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001695 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001696}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001697#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001698
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001699static
Fam Zheng528f46a2016-03-01 14:18:18 +08001700RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1701 void (*resized)(const char*,
1702 uint64_t length,
1703 void *host),
1704 void *host, bool resizeable,
1705 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001706{
1707 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001708 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001709
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001710 size = HOST_PAGE_ALIGN(size);
1711 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001712 new_block = g_malloc0(sizeof(*new_block));
1713 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001714 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001715 new_block->used_length = size;
1716 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001717 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001718 new_block->fd = -1;
1719 new_block->host = host;
1720 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001721 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001722 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001723 if (resizeable) {
1724 new_block->flags |= RAM_RESIZEABLE;
1725 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001726 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001727 if (local_err) {
1728 g_free(new_block);
1729 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001730 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001731 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001732 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001733}
1734
Fam Zheng528f46a2016-03-01 14:18:18 +08001735RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001736 MemoryRegion *mr, Error **errp)
1737{
1738 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1739}
1740
Fam Zheng528f46a2016-03-01 14:18:18 +08001741RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001742{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001743 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1744}
1745
Fam Zheng528f46a2016-03-01 14:18:18 +08001746RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001747 void (*resized)(const char*,
1748 uint64_t length,
1749 void *host),
1750 MemoryRegion *mr, Error **errp)
1751{
1752 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001753}
bellarde9a1ab12007-02-08 23:08:38 +00001754
Paolo Bonzini43771532013-09-09 17:58:40 +02001755static void reclaim_ramblock(RAMBlock *block)
1756{
1757 if (block->flags & RAM_PREALLOC) {
1758 ;
1759 } else if (xen_enabled()) {
1760 xen_invalidate_map_cache_entry(block->host);
1761#ifndef _WIN32
1762 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001763 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001764 close(block->fd);
1765#endif
1766 } else {
1767 qemu_anon_ram_free(block->host, block->max_length);
1768 }
1769 g_free(block);
1770}
1771
Fam Zhengf1060c52016-03-01 14:18:22 +08001772void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001773{
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001774 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001775 QLIST_REMOVE_RCU(block, next);
1776 ram_list.mru_block = NULL;
1777 /* Write list before version */
1778 smp_wmb();
1779 ram_list.version++;
1780 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001781 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001782}
1783
Huang Yingcd19cfa2011-03-02 08:56:19 +01001784#ifndef _WIN32
1785void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1786{
1787 RAMBlock *block;
1788 ram_addr_t offset;
1789 int flags;
1790 void *area, *vaddr;
1791
Mike Day0dc3f442013-09-05 14:41:35 -04001792 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001793 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001794 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001795 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001796 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001797 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001798 } else if (xen_enabled()) {
1799 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001800 } else {
1801 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001802 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001803 flags |= (block->flags & RAM_SHARED ?
1804 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001805 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1806 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001807 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001808 /*
1809 * Remap needs to match alloc. Accelerators that
1810 * set phys_mem_alloc never remap. If they did,
1811 * we'd need a remap hook here.
1812 */
1813 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1814
Huang Yingcd19cfa2011-03-02 08:56:19 +01001815 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1816 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1817 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001818 }
1819 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001820 fprintf(stderr, "Could not remap addr: "
1821 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001822 length, addr);
1823 exit(1);
1824 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001825 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001826 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001827 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001828 }
1829 }
1830}
1831#endif /* !_WIN32 */
1832
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001833int qemu_get_ram_fd(ram_addr_t addr)
1834{
Mike Dayae3a7042013-09-05 14:41:35 -04001835 RAMBlock *block;
1836 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001837
Mike Day0dc3f442013-09-05 14:41:35 -04001838 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001839 block = qemu_get_ram_block(addr);
1840 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001841 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001842 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001843}
1844
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001845void qemu_set_ram_fd(ram_addr_t addr, int fd)
1846{
1847 RAMBlock *block;
1848
1849 rcu_read_lock();
1850 block = qemu_get_ram_block(addr);
1851 block->fd = fd;
1852 rcu_read_unlock();
1853}
1854
Damjan Marion3fd74b82014-06-26 23:01:32 +02001855void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1856{
Mike Dayae3a7042013-09-05 14:41:35 -04001857 RAMBlock *block;
1858 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001859
Mike Day0dc3f442013-09-05 14:41:35 -04001860 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001861 block = qemu_get_ram_block(addr);
1862 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001863 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001864 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001865}
1866
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001867/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001868 * This should not be used for general purpose DMA. Use address_space_map
1869 * or address_space_rw instead. For local memory (e.g. video ram) that the
1870 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001871 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001872 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001873 */
Gonglei3655cb92016-02-20 10:35:20 +08001874void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001875{
Gonglei3655cb92016-02-20 10:35:20 +08001876 RAMBlock *block = ram_block;
1877
1878 if (block == NULL) {
1879 block = qemu_get_ram_block(addr);
1880 }
Mike Dayae3a7042013-09-05 14:41:35 -04001881
1882 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001883 /* We need to check if the requested address is in the RAM
1884 * because we don't want to map the entire memory in QEMU.
1885 * In that case just map until the end of the page.
1886 */
1887 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001888 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001889 }
Mike Dayae3a7042013-09-05 14:41:35 -04001890
1891 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001892 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001893 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001894}
1895
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001896/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001897 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001898 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001899 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001900 */
Gonglei3655cb92016-02-20 10:35:20 +08001901static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1902 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001903{
Gonglei3655cb92016-02-20 10:35:20 +08001904 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001905 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001906 if (*size == 0) {
1907 return NULL;
1908 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001909
Gonglei3655cb92016-02-20 10:35:20 +08001910 if (block == NULL) {
1911 block = qemu_get_ram_block(addr);
1912 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001913 offset_inside_block = addr - block->offset;
1914 *size = MIN(*size, block->max_length - offset_inside_block);
1915
1916 if (xen_enabled() && block->host == NULL) {
1917 /* We need to check if the requested address is in the RAM
1918 * because we don't want to map the entire memory in QEMU.
1919 * In that case just map the requested area.
1920 */
1921 if (block->offset == 0) {
1922 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001923 }
1924
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001925 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001926 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001927
1928 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001929}
1930
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001931/*
1932 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1933 * in that RAMBlock.
1934 *
1935 * ptr: Host pointer to look up
1936 * round_offset: If true round the result offset down to a page boundary
1937 * *ram_addr: set to result ram_addr
1938 * *offset: set to result offset within the RAMBlock
1939 *
1940 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001941 *
1942 * By the time this function returns, the returned pointer is not protected
1943 * by RCU anymore. If the caller is not within an RCU critical section and
1944 * does not hold the iothread lock, it must have other means of protecting the
1945 * pointer, such as a reference to the region that includes the incoming
1946 * ram_addr_t.
1947 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001948RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1949 ram_addr_t *ram_addr,
1950 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001951{
pbrook94a6b542009-04-11 17:15:54 +00001952 RAMBlock *block;
1953 uint8_t *host = ptr;
1954
Jan Kiszka868bb332011-06-21 22:59:09 +02001955 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001956 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001957 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001958 block = qemu_get_ram_block(*ram_addr);
1959 if (block) {
1960 *offset = (host - block->host);
1961 }
Mike Day0dc3f442013-09-05 14:41:35 -04001962 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001963 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001964 }
1965
Mike Day0dc3f442013-09-05 14:41:35 -04001966 rcu_read_lock();
1967 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001968 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001969 goto found;
1970 }
1971
Mike Day0dc3f442013-09-05 14:41:35 -04001972 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001973 /* This case append when the block is not mapped. */
1974 if (block->host == NULL) {
1975 continue;
1976 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001977 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001978 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001979 }
pbrook94a6b542009-04-11 17:15:54 +00001980 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001981
Mike Day0dc3f442013-09-05 14:41:35 -04001982 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001983 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001984
1985found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001986 *offset = (host - block->host);
1987 if (round_offset) {
1988 *offset &= TARGET_PAGE_MASK;
1989 }
1990 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001991 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001992 return block;
1993}
1994
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001995/*
1996 * Finds the named RAMBlock
1997 *
1998 * name: The name of RAMBlock to find
1999 *
2000 * Returns: RAMBlock (or NULL if not found)
2001 */
2002RAMBlock *qemu_ram_block_by_name(const char *name)
2003{
2004 RAMBlock *block;
2005
2006 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2007 if (!strcmp(name, block->idstr)) {
2008 return block;
2009 }
2010 }
2011
2012 return NULL;
2013}
2014
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002015/* Some of the softmmu routines need to translate from a host pointer
2016 (typically a TLB entry) back to a ram offset. */
2017MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2018{
2019 RAMBlock *block;
2020 ram_addr_t offset; /* Not used */
2021
2022 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2023
2024 if (!block) {
2025 return NULL;
2026 }
2027
2028 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002029}
Alex Williamsonf471a172010-06-11 11:11:42 -06002030
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002031/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002032static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002033 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002034{
Juan Quintela52159192013-10-08 12:44:04 +02002035 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002036 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002037 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002038 switch (size) {
2039 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002040 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002041 break;
2042 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002043 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002044 break;
2045 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002046 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002047 break;
2048 default:
2049 abort();
2050 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002051 /* Set both VGA and migration bits for simplicity and to remove
2052 * the notdirty callback faster.
2053 */
2054 cpu_physical_memory_set_dirty_range(ram_addr, size,
2055 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002056 /* we remove the notdirty callback only if the code has been
2057 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002058 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002059 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002060 }
bellard1ccde1c2004-02-06 19:46:14 +00002061}
2062
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002063static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2064 unsigned size, bool is_write)
2065{
2066 return is_write;
2067}
2068
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002069static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002070 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002071 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002072 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002073};
2074
pbrook0f459d12008-06-09 00:20:13 +00002075/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002076static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002077{
Andreas Färber93afead2013-08-26 03:41:01 +02002078 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002079 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002080 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002081 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002082 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002083 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002084 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002085
Andreas Färberff4700b2013-08-26 18:23:18 +02002086 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002087 /* We re-entered the check after replacing the TB. Now raise
2088 * the debug interrupt so that is will trigger after the
2089 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002090 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002091 return;
2092 }
Andreas Färber93afead2013-08-26 03:41:01 +02002093 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002094 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002095 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2096 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002097 if (flags == BP_MEM_READ) {
2098 wp->flags |= BP_WATCHPOINT_HIT_READ;
2099 } else {
2100 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2101 }
2102 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002103 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002104 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002105 if (wp->flags & BP_CPU &&
2106 !cc->debug_check_watchpoint(cpu, wp)) {
2107 wp->flags &= ~BP_WATCHPOINT_HIT;
2108 continue;
2109 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002110 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002111 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002112 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002113 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002114 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002115 } else {
2116 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002117 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002118 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002119 }
aliguori06d55cc2008-11-18 20:24:06 +00002120 }
aliguori6e140f22008-11-18 20:37:55 +00002121 } else {
2122 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002123 }
2124 }
2125}
2126
pbrook6658ffb2007-03-16 23:58:11 +00002127/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2128 so these check for a hit then pass through to the normal out-of-line
2129 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002130static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2131 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002132{
Peter Maydell66b9b432015-04-26 16:49:24 +01002133 MemTxResult res;
2134 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002135 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2136 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002137
Peter Maydell66b9b432015-04-26 16:49:24 +01002138 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002139 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002140 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002141 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002142 break;
2143 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002144 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002145 break;
2146 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002147 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002148 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002149 default: abort();
2150 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002151 *pdata = data;
2152 return res;
2153}
2154
2155static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2156 uint64_t val, unsigned size,
2157 MemTxAttrs attrs)
2158{
2159 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002160 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2161 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002162
2163 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2164 switch (size) {
2165 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002166 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002167 break;
2168 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002169 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002170 break;
2171 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002172 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002173 break;
2174 default: abort();
2175 }
2176 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002177}
2178
Avi Kivity1ec9b902012-01-02 12:47:48 +02002179static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002180 .read_with_attrs = watch_mem_read,
2181 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002182 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002183};
pbrook6658ffb2007-03-16 23:58:11 +00002184
Peter Maydellf25a49e2015-04-26 16:49:24 +01002185static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2186 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002187{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002188 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002189 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002190 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002191
blueswir1db7b5422007-05-26 17:36:03 +00002192#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002193 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002194 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002195#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002196 res = address_space_read(subpage->as, addr + subpage->base,
2197 attrs, buf, len);
2198 if (res) {
2199 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002200 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002201 switch (len) {
2202 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002203 *data = ldub_p(buf);
2204 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002205 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002206 *data = lduw_p(buf);
2207 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002208 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002209 *data = ldl_p(buf);
2210 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002211 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002212 *data = ldq_p(buf);
2213 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002214 default:
2215 abort();
2216 }
blueswir1db7b5422007-05-26 17:36:03 +00002217}
2218
Peter Maydellf25a49e2015-04-26 16:49:24 +01002219static MemTxResult subpage_write(void *opaque, hwaddr addr,
2220 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002221{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002222 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002223 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002224
blueswir1db7b5422007-05-26 17:36:03 +00002225#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002226 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002227 " value %"PRIx64"\n",
2228 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002229#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002230 switch (len) {
2231 case 1:
2232 stb_p(buf, value);
2233 break;
2234 case 2:
2235 stw_p(buf, value);
2236 break;
2237 case 4:
2238 stl_p(buf, value);
2239 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002240 case 8:
2241 stq_p(buf, value);
2242 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002243 default:
2244 abort();
2245 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002246 return address_space_write(subpage->as, addr + subpage->base,
2247 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002248}
2249
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002250static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002251 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002252{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002253 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002254#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002255 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002256 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002257#endif
2258
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002259 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002260 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002261}
2262
Avi Kivity70c68e42012-01-02 12:32:48 +02002263static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002264 .read_with_attrs = subpage_read,
2265 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002266 .impl.min_access_size = 1,
2267 .impl.max_access_size = 8,
2268 .valid.min_access_size = 1,
2269 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002270 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002271 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002272};
2273
Anthony Liguoric227f092009-10-01 16:12:16 -05002274static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002275 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002276{
2277 int idx, eidx;
2278
2279 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2280 return -1;
2281 idx = SUBPAGE_IDX(start);
2282 eidx = SUBPAGE_IDX(end);
2283#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002284 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2285 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002286#endif
blueswir1db7b5422007-05-26 17:36:03 +00002287 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002288 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002289 }
2290
2291 return 0;
2292}
2293
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002294static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002295{
Anthony Liguoric227f092009-10-01 16:12:16 -05002296 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002297
Anthony Liguori7267c092011-08-20 22:09:37 -05002298 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002299
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002300 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002301 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002302 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002303 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002304 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002305#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002306 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2307 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002308#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002309 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002310
2311 return mmio;
2312}
2313
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002314static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2315 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002316{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002317 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002318 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002319 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002320 .mr = mr,
2321 .offset_within_address_space = 0,
2322 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002323 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002324 };
2325
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002326 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002327}
2328
Peter Maydella54c87b2016-01-21 14:15:05 +00002329MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002330{
Peter Maydella54c87b2016-01-21 14:15:05 +00002331 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2332 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002333 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002334 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002335
2336 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002337}
2338
Avi Kivitye9179ce2009-06-14 11:38:52 +03002339static void io_mem_init(void)
2340{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002341 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002342 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002343 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002344 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002345 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002346 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002347 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002348}
2349
Avi Kivityac1970f2012-10-03 16:22:53 +02002350static void mem_begin(MemoryListener *listener)
2351{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002352 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002353 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2354 uint16_t n;
2355
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002356 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002357 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002358 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002359 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002360 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002361 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002362 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002363 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002364
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002365 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002366 d->as = as;
2367 as->next_dispatch = d;
2368}
2369
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002370static void address_space_dispatch_free(AddressSpaceDispatch *d)
2371{
2372 phys_sections_free(&d->map);
2373 g_free(d);
2374}
2375
Paolo Bonzini00752702013-05-29 12:13:54 +02002376static void mem_commit(MemoryListener *listener)
2377{
2378 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002379 AddressSpaceDispatch *cur = as->dispatch;
2380 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002381
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002382 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002383
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002384 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002385 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002386 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002387 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002388}
2389
Avi Kivity1d711482012-10-02 18:54:45 +02002390static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002391{
Peter Maydell32857f42015-10-01 15:29:50 +01002392 CPUAddressSpace *cpuas;
2393 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002394
2395 /* since each CPU stores ram addresses in its TLB cache, we must
2396 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002397 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2398 cpu_reloading_memory_map();
2399 /* The CPU and TLB are protected by the iothread lock.
2400 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2401 * may have split the RCU critical section.
2402 */
2403 d = atomic_rcu_read(&cpuas->as->dispatch);
2404 cpuas->memory_dispatch = d;
2405 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002406}
2407
Avi Kivityac1970f2012-10-03 16:22:53 +02002408void address_space_init_dispatch(AddressSpace *as)
2409{
Paolo Bonzini00752702013-05-29 12:13:54 +02002410 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002411 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002412 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002413 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002414 .region_add = mem_add,
2415 .region_nop = mem_add,
2416 .priority = 0,
2417 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002418 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002419}
2420
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002421void address_space_unregister(AddressSpace *as)
2422{
2423 memory_listener_unregister(&as->dispatch_listener);
2424}
2425
Avi Kivity83f3c252012-10-07 12:59:55 +02002426void address_space_destroy_dispatch(AddressSpace *as)
2427{
2428 AddressSpaceDispatch *d = as->dispatch;
2429
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002430 atomic_rcu_set(&as->dispatch, NULL);
2431 if (d) {
2432 call_rcu(d, address_space_dispatch_free, rcu);
2433 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002434}
2435
Avi Kivity62152b82011-07-26 14:26:14 +03002436static void memory_map_init(void)
2437{
Anthony Liguori7267c092011-08-20 22:09:37 -05002438 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002439
Paolo Bonzini57271d62013-11-07 17:14:37 +01002440 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002441 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002442
Anthony Liguori7267c092011-08-20 22:09:37 -05002443 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002444 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2445 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002446 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002447}
2448
2449MemoryRegion *get_system_memory(void)
2450{
2451 return system_memory;
2452}
2453
Avi Kivity309cb472011-08-08 16:09:03 +03002454MemoryRegion *get_system_io(void)
2455{
2456 return system_io;
2457}
2458
pbrooke2eef172008-06-08 01:09:01 +00002459#endif /* !defined(CONFIG_USER_ONLY) */
2460
bellard13eb76e2004-01-24 15:23:36 +00002461/* physical memory access (slow version, mainly for debug) */
2462#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002463int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002464 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002465{
2466 int l, flags;
2467 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002468 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002469
2470 while (len > 0) {
2471 page = addr & TARGET_PAGE_MASK;
2472 l = (page + TARGET_PAGE_SIZE) - addr;
2473 if (l > len)
2474 l = len;
2475 flags = page_get_flags(page);
2476 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002477 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002478 if (is_write) {
2479 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002480 return -1;
bellard579a97f2007-11-11 14:26:47 +00002481 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002482 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002483 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002484 memcpy(p, buf, l);
2485 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002486 } else {
2487 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002488 return -1;
bellard579a97f2007-11-11 14:26:47 +00002489 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002490 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002491 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002492 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002493 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002494 }
2495 len -= l;
2496 buf += l;
2497 addr += l;
2498 }
Paul Brooka68fe892010-03-01 00:08:59 +00002499 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002500}
bellard8df1cd02005-01-28 22:37:22 +00002501
bellard13eb76e2004-01-24 15:23:36 +00002502#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002503
Paolo Bonzini845b6212015-03-23 11:45:53 +01002504static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002505 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002506{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002507 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2508 /* No early return if dirty_log_mask is or becomes 0, because
2509 * cpu_physical_memory_set_dirty_range will still call
2510 * xen_modified_memory.
2511 */
2512 if (dirty_log_mask) {
2513 dirty_log_mask =
2514 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002515 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002516 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2517 tb_invalidate_phys_range(addr, addr + length);
2518 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2519 }
2520 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002521}
2522
Richard Henderson23326162013-07-08 14:55:59 -07002523static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002524{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002525 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002526
2527 /* Regions are assumed to support 1-4 byte accesses unless
2528 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002529 if (access_size_max == 0) {
2530 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002531 }
Richard Henderson23326162013-07-08 14:55:59 -07002532
2533 /* Bound the maximum access by the alignment of the address. */
2534 if (!mr->ops->impl.unaligned) {
2535 unsigned align_size_max = addr & -addr;
2536 if (align_size_max != 0 && align_size_max < access_size_max) {
2537 access_size_max = align_size_max;
2538 }
2539 }
2540
2541 /* Don't attempt accesses larger than the maximum. */
2542 if (l > access_size_max) {
2543 l = access_size_max;
2544 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002545 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002546
2547 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002548}
2549
Jan Kiszka4840f102015-06-18 18:47:22 +02002550static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002551{
Jan Kiszka4840f102015-06-18 18:47:22 +02002552 bool unlocked = !qemu_mutex_iothread_locked();
2553 bool release_lock = false;
2554
2555 if (unlocked && mr->global_locking) {
2556 qemu_mutex_lock_iothread();
2557 unlocked = false;
2558 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002559 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002560 if (mr->flush_coalesced_mmio) {
2561 if (unlocked) {
2562 qemu_mutex_lock_iothread();
2563 }
2564 qemu_flush_coalesced_mmio_buffer();
2565 if (unlocked) {
2566 qemu_mutex_unlock_iothread();
2567 }
2568 }
2569
2570 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002571}
2572
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002573/* Called within RCU critical section. */
2574static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2575 MemTxAttrs attrs,
2576 const uint8_t *buf,
2577 int len, hwaddr addr1,
2578 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002579{
bellard13eb76e2004-01-24 15:23:36 +00002580 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002581 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002582 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002583 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002584
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002585 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002586 if (!memory_access_is_direct(mr, true)) {
2587 release_lock |= prepare_mmio_access(mr);
2588 l = memory_access_size(mr, l, addr1);
2589 /* XXX: could force current_cpu to NULL to avoid
2590 potential bugs */
2591 switch (l) {
2592 case 8:
2593 /* 64 bit write access */
2594 val = ldq_p(buf);
2595 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2596 attrs);
2597 break;
2598 case 4:
2599 /* 32 bit write access */
2600 val = ldl_p(buf);
2601 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2602 attrs);
2603 break;
2604 case 2:
2605 /* 16 bit write access */
2606 val = lduw_p(buf);
2607 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2608 attrs);
2609 break;
2610 case 1:
2611 /* 8 bit write access */
2612 val = ldub_p(buf);
2613 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2614 attrs);
2615 break;
2616 default:
2617 abort();
bellard13eb76e2004-01-24 15:23:36 +00002618 }
2619 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002620 addr1 += memory_region_get_ram_addr(mr);
2621 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002622 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002623 memcpy(ptr, buf, l);
2624 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002625 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002626
2627 if (release_lock) {
2628 qemu_mutex_unlock_iothread();
2629 release_lock = false;
2630 }
2631
bellard13eb76e2004-01-24 15:23:36 +00002632 len -= l;
2633 buf += l;
2634 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002635
2636 if (!len) {
2637 break;
2638 }
2639
2640 l = len;
2641 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002642 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002643
Peter Maydell3b643492015-04-26 16:49:23 +01002644 return result;
bellard13eb76e2004-01-24 15:23:36 +00002645}
bellard8df1cd02005-01-28 22:37:22 +00002646
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002647MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2648 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002649{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002650 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002651 hwaddr addr1;
2652 MemoryRegion *mr;
2653 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002654
2655 if (len > 0) {
2656 rcu_read_lock();
2657 l = len;
2658 mr = address_space_translate(as, addr, &addr1, &l, true);
2659 result = address_space_write_continue(as, addr, attrs, buf, len,
2660 addr1, l, mr);
2661 rcu_read_unlock();
2662 }
2663
2664 return result;
2665}
2666
2667/* Called within RCU critical section. */
2668MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2669 MemTxAttrs attrs, uint8_t *buf,
2670 int len, hwaddr addr1, hwaddr l,
2671 MemoryRegion *mr)
2672{
2673 uint8_t *ptr;
2674 uint64_t val;
2675 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002676 bool release_lock = false;
2677
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002678 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002679 if (!memory_access_is_direct(mr, false)) {
2680 /* I/O case */
2681 release_lock |= prepare_mmio_access(mr);
2682 l = memory_access_size(mr, l, addr1);
2683 switch (l) {
2684 case 8:
2685 /* 64 bit read access */
2686 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2687 attrs);
2688 stq_p(buf, val);
2689 break;
2690 case 4:
2691 /* 32 bit read access */
2692 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2693 attrs);
2694 stl_p(buf, val);
2695 break;
2696 case 2:
2697 /* 16 bit read access */
2698 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2699 attrs);
2700 stw_p(buf, val);
2701 break;
2702 case 1:
2703 /* 8 bit read access */
2704 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2705 attrs);
2706 stb_p(buf, val);
2707 break;
2708 default:
2709 abort();
2710 }
2711 } else {
2712 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002713 ptr = qemu_get_ram_ptr(mr->ram_block,
2714 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002715 memcpy(buf, ptr, l);
2716 }
2717
2718 if (release_lock) {
2719 qemu_mutex_unlock_iothread();
2720 release_lock = false;
2721 }
2722
2723 len -= l;
2724 buf += l;
2725 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002726
2727 if (!len) {
2728 break;
2729 }
2730
2731 l = len;
2732 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002733 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002734
2735 return result;
2736}
2737
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002738MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2739 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002740{
2741 hwaddr l;
2742 hwaddr addr1;
2743 MemoryRegion *mr;
2744 MemTxResult result = MEMTX_OK;
2745
2746 if (len > 0) {
2747 rcu_read_lock();
2748 l = len;
2749 mr = address_space_translate(as, addr, &addr1, &l, false);
2750 result = address_space_read_continue(as, addr, attrs, buf, len,
2751 addr1, l, mr);
2752 rcu_read_unlock();
2753 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002754
2755 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002756}
2757
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002758MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2759 uint8_t *buf, int len, bool is_write)
2760{
2761 if (is_write) {
2762 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2763 } else {
2764 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2765 }
2766}
Avi Kivityac1970f2012-10-03 16:22:53 +02002767
Avi Kivitya8170e52012-10-23 12:30:10 +02002768void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002769 int len, int is_write)
2770{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002771 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2772 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002773}
2774
Alexander Graf582b55a2013-12-11 14:17:44 +01002775enum write_rom_type {
2776 WRITE_DATA,
2777 FLUSH_CACHE,
2778};
2779
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002780static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002781 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002782{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002783 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002784 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002785 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002786 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002787
Paolo Bonzini41063e12015-03-18 14:21:43 +01002788 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002789 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002790 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002791 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002792
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002793 if (!(memory_region_is_ram(mr) ||
2794 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002795 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002796 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002797 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002798 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002799 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002800 switch (type) {
2801 case WRITE_DATA:
2802 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002803 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002804 break;
2805 case FLUSH_CACHE:
2806 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2807 break;
2808 }
bellardd0ecd2a2006-04-23 17:14:48 +00002809 }
2810 len -= l;
2811 buf += l;
2812 addr += l;
2813 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002814 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002815}
2816
Alexander Graf582b55a2013-12-11 14:17:44 +01002817/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002818void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002819 const uint8_t *buf, int len)
2820{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002821 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002822}
2823
2824void cpu_flush_icache_range(hwaddr start, int len)
2825{
2826 /*
2827 * This function should do the same thing as an icache flush that was
2828 * triggered from within the guest. For TCG we are always cache coherent,
2829 * so there is no need to flush anything. For KVM / Xen we need to flush
2830 * the host's instruction cache at least.
2831 */
2832 if (tcg_enabled()) {
2833 return;
2834 }
2835
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002836 cpu_physical_memory_write_rom_internal(&address_space_memory,
2837 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002838}
2839
aliguori6d16c2f2009-01-22 16:59:11 +00002840typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002841 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002842 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002843 hwaddr addr;
2844 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002845 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002846} BounceBuffer;
2847
2848static BounceBuffer bounce;
2849
aliguoriba223c22009-01-22 16:59:16 +00002850typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002851 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002852 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002853} MapClient;
2854
Fam Zheng38e047b2015-03-16 17:03:35 +08002855QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002856static QLIST_HEAD(map_client_list, MapClient) map_client_list
2857 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002858
Fam Zhenge95205e2015-03-16 17:03:37 +08002859static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002860{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002861 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002862 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002863}
2864
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002865static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002866{
2867 MapClient *client;
2868
Blue Swirl72cf2d42009-09-12 07:36:22 +00002869 while (!QLIST_EMPTY(&map_client_list)) {
2870 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002871 qemu_bh_schedule(client->bh);
2872 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002873 }
2874}
2875
Fam Zhenge95205e2015-03-16 17:03:37 +08002876void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002877{
2878 MapClient *client = g_malloc(sizeof(*client));
2879
Fam Zheng38e047b2015-03-16 17:03:35 +08002880 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002881 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002882 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002883 if (!atomic_read(&bounce.in_use)) {
2884 cpu_notify_map_clients_locked();
2885 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002886 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002887}
2888
Fam Zheng38e047b2015-03-16 17:03:35 +08002889void cpu_exec_init_all(void)
2890{
2891 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002892 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002893 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002894 qemu_mutex_init(&map_client_list_lock);
2895}
2896
Fam Zhenge95205e2015-03-16 17:03:37 +08002897void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002898{
Fam Zhenge95205e2015-03-16 17:03:37 +08002899 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002900
Fam Zhenge95205e2015-03-16 17:03:37 +08002901 qemu_mutex_lock(&map_client_list_lock);
2902 QLIST_FOREACH(client, &map_client_list, link) {
2903 if (client->bh == bh) {
2904 cpu_unregister_map_client_do(client);
2905 break;
2906 }
2907 }
2908 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002909}
2910
2911static void cpu_notify_map_clients(void)
2912{
Fam Zheng38e047b2015-03-16 17:03:35 +08002913 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002914 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002915 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002916}
2917
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002918bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2919{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002920 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002921 hwaddr l, xlat;
2922
Paolo Bonzini41063e12015-03-18 14:21:43 +01002923 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002924 while (len > 0) {
2925 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002926 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2927 if (!memory_access_is_direct(mr, is_write)) {
2928 l = memory_access_size(mr, l, addr);
2929 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002930 return false;
2931 }
2932 }
2933
2934 len -= l;
2935 addr += l;
2936 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002937 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002938 return true;
2939}
2940
aliguori6d16c2f2009-01-22 16:59:11 +00002941/* Map a physical memory region into a host virtual address.
2942 * May map a subset of the requested range, given by and returned in *plen.
2943 * May return NULL if resources needed to perform the mapping are exhausted.
2944 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002945 * Use cpu_register_map_client() to know when retrying the map operation is
2946 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002947 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002948void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002949 hwaddr addr,
2950 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002951 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002952{
Avi Kivitya8170e52012-10-23 12:30:10 +02002953 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002954 hwaddr done = 0;
2955 hwaddr l, xlat, base;
2956 MemoryRegion *mr, *this_mr;
2957 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002958 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002959
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002960 if (len == 0) {
2961 return NULL;
2962 }
aliguori6d16c2f2009-01-22 16:59:11 +00002963
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002964 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002965 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002966 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002967
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002968 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002969 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002970 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002971 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002972 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002973 /* Avoid unbounded allocations */
2974 l = MIN(l, TARGET_PAGE_SIZE);
2975 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002976 bounce.addr = addr;
2977 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002978
2979 memory_region_ref(mr);
2980 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002981 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002982 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2983 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002984 }
aliguori6d16c2f2009-01-22 16:59:11 +00002985
Paolo Bonzini41063e12015-03-18 14:21:43 +01002986 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002987 *plen = l;
2988 return bounce.buffer;
2989 }
2990
2991 base = xlat;
2992 raddr = memory_region_get_ram_addr(mr);
2993
2994 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002995 len -= l;
2996 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002997 done += l;
2998 if (len == 0) {
2999 break;
3000 }
3001
3002 l = len;
3003 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3004 if (this_mr != mr || xlat != base + done) {
3005 break;
3006 }
aliguori6d16c2f2009-01-22 16:59:11 +00003007 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003008
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003009 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003010 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003011 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003012 rcu_read_unlock();
3013
3014 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003015}
3016
Avi Kivityac1970f2012-10-03 16:22:53 +02003017/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003018 * Will also mark the memory as dirty if is_write == 1. access_len gives
3019 * the amount of memory that was actually read or written by the caller.
3020 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003021void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3022 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003023{
3024 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003025 MemoryRegion *mr;
3026 ram_addr_t addr1;
3027
3028 mr = qemu_ram_addr_from_host(buffer, &addr1);
3029 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003030 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003031 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003032 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003033 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003034 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003035 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003036 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003037 return;
3038 }
3039 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003040 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3041 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003042 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003043 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003044 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003045 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003046 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003047 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003048}
bellardd0ecd2a2006-04-23 17:14:48 +00003049
Avi Kivitya8170e52012-10-23 12:30:10 +02003050void *cpu_physical_memory_map(hwaddr addr,
3051 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003052 int is_write)
3053{
3054 return address_space_map(&address_space_memory, addr, plen, is_write);
3055}
3056
Avi Kivitya8170e52012-10-23 12:30:10 +02003057void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3058 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003059{
3060 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3061}
3062
bellard8df1cd02005-01-28 22:37:22 +00003063/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003064static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3065 MemTxAttrs attrs,
3066 MemTxResult *result,
3067 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003068{
bellard8df1cd02005-01-28 22:37:22 +00003069 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003070 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003071 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003072 hwaddr l = 4;
3073 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003074 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003075 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003076
Paolo Bonzini41063e12015-03-18 14:21:43 +01003077 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003078 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003079 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003080 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003081
bellard8df1cd02005-01-28 22:37:22 +00003082 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003083 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003084#if defined(TARGET_WORDS_BIGENDIAN)
3085 if (endian == DEVICE_LITTLE_ENDIAN) {
3086 val = bswap32(val);
3087 }
3088#else
3089 if (endian == DEVICE_BIG_ENDIAN) {
3090 val = bswap32(val);
3091 }
3092#endif
bellard8df1cd02005-01-28 22:37:22 +00003093 } else {
3094 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003095 ptr = qemu_get_ram_ptr(mr->ram_block,
3096 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003097 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003098 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003099 switch (endian) {
3100 case DEVICE_LITTLE_ENDIAN:
3101 val = ldl_le_p(ptr);
3102 break;
3103 case DEVICE_BIG_ENDIAN:
3104 val = ldl_be_p(ptr);
3105 break;
3106 default:
3107 val = ldl_p(ptr);
3108 break;
3109 }
Peter Maydell50013112015-04-26 16:49:24 +01003110 r = MEMTX_OK;
3111 }
3112 if (result) {
3113 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003114 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003115 if (release_lock) {
3116 qemu_mutex_unlock_iothread();
3117 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003118 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003119 return val;
3120}
3121
Peter Maydell50013112015-04-26 16:49:24 +01003122uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3123 MemTxAttrs attrs, MemTxResult *result)
3124{
3125 return address_space_ldl_internal(as, addr, attrs, result,
3126 DEVICE_NATIVE_ENDIAN);
3127}
3128
3129uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3130 MemTxAttrs attrs, MemTxResult *result)
3131{
3132 return address_space_ldl_internal(as, addr, attrs, result,
3133 DEVICE_LITTLE_ENDIAN);
3134}
3135
3136uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3137 MemTxAttrs attrs, MemTxResult *result)
3138{
3139 return address_space_ldl_internal(as, addr, attrs, result,
3140 DEVICE_BIG_ENDIAN);
3141}
3142
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003143uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003144{
Peter Maydell50013112015-04-26 16:49:24 +01003145 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003146}
3147
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003148uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003149{
Peter Maydell50013112015-04-26 16:49:24 +01003150 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003151}
3152
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003153uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003154{
Peter Maydell50013112015-04-26 16:49:24 +01003155 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003156}
3157
bellard84b7b8e2005-11-28 21:19:04 +00003158/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003159static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3160 MemTxAttrs attrs,
3161 MemTxResult *result,
3162 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003163{
bellard84b7b8e2005-11-28 21:19:04 +00003164 uint8_t *ptr;
3165 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003166 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003167 hwaddr l = 8;
3168 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003169 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003170 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003171
Paolo Bonzini41063e12015-03-18 14:21:43 +01003172 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003173 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003174 false);
3175 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003176 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003177
bellard84b7b8e2005-11-28 21:19:04 +00003178 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003179 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003180#if defined(TARGET_WORDS_BIGENDIAN)
3181 if (endian == DEVICE_LITTLE_ENDIAN) {
3182 val = bswap64(val);
3183 }
3184#else
3185 if (endian == DEVICE_BIG_ENDIAN) {
3186 val = bswap64(val);
3187 }
3188#endif
bellard84b7b8e2005-11-28 21:19:04 +00003189 } else {
3190 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003191 ptr = qemu_get_ram_ptr(mr->ram_block,
3192 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003193 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003194 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003195 switch (endian) {
3196 case DEVICE_LITTLE_ENDIAN:
3197 val = ldq_le_p(ptr);
3198 break;
3199 case DEVICE_BIG_ENDIAN:
3200 val = ldq_be_p(ptr);
3201 break;
3202 default:
3203 val = ldq_p(ptr);
3204 break;
3205 }
Peter Maydell50013112015-04-26 16:49:24 +01003206 r = MEMTX_OK;
3207 }
3208 if (result) {
3209 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003210 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003211 if (release_lock) {
3212 qemu_mutex_unlock_iothread();
3213 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003214 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003215 return val;
3216}
3217
Peter Maydell50013112015-04-26 16:49:24 +01003218uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3219 MemTxAttrs attrs, MemTxResult *result)
3220{
3221 return address_space_ldq_internal(as, addr, attrs, result,
3222 DEVICE_NATIVE_ENDIAN);
3223}
3224
3225uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3226 MemTxAttrs attrs, MemTxResult *result)
3227{
3228 return address_space_ldq_internal(as, addr, attrs, result,
3229 DEVICE_LITTLE_ENDIAN);
3230}
3231
3232uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3233 MemTxAttrs attrs, MemTxResult *result)
3234{
3235 return address_space_ldq_internal(as, addr, attrs, result,
3236 DEVICE_BIG_ENDIAN);
3237}
3238
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003239uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003240{
Peter Maydell50013112015-04-26 16:49:24 +01003241 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003242}
3243
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003244uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003245{
Peter Maydell50013112015-04-26 16:49:24 +01003246 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003247}
3248
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003249uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250{
Peter Maydell50013112015-04-26 16:49:24 +01003251 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003252}
3253
bellardaab33092005-10-30 20:48:42 +00003254/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003255uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3256 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003257{
3258 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003259 MemTxResult r;
3260
3261 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3262 if (result) {
3263 *result = r;
3264 }
bellardaab33092005-10-30 20:48:42 +00003265 return val;
3266}
3267
Peter Maydell50013112015-04-26 16:49:24 +01003268uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3269{
3270 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3271}
3272
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003273/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003274static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3275 hwaddr addr,
3276 MemTxAttrs attrs,
3277 MemTxResult *result,
3278 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003279{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003280 uint8_t *ptr;
3281 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003282 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003283 hwaddr l = 2;
3284 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003285 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003286 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003287
Paolo Bonzini41063e12015-03-18 14:21:43 +01003288 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003289 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003290 false);
3291 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003292 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003293
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003294 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003295 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003296#if defined(TARGET_WORDS_BIGENDIAN)
3297 if (endian == DEVICE_LITTLE_ENDIAN) {
3298 val = bswap16(val);
3299 }
3300#else
3301 if (endian == DEVICE_BIG_ENDIAN) {
3302 val = bswap16(val);
3303 }
3304#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003305 } else {
3306 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003307 ptr = qemu_get_ram_ptr(mr->ram_block,
3308 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003309 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003310 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003311 switch (endian) {
3312 case DEVICE_LITTLE_ENDIAN:
3313 val = lduw_le_p(ptr);
3314 break;
3315 case DEVICE_BIG_ENDIAN:
3316 val = lduw_be_p(ptr);
3317 break;
3318 default:
3319 val = lduw_p(ptr);
3320 break;
3321 }
Peter Maydell50013112015-04-26 16:49:24 +01003322 r = MEMTX_OK;
3323 }
3324 if (result) {
3325 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003326 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003327 if (release_lock) {
3328 qemu_mutex_unlock_iothread();
3329 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003330 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003331 return val;
bellardaab33092005-10-30 20:48:42 +00003332}
3333
Peter Maydell50013112015-04-26 16:49:24 +01003334uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3335 MemTxAttrs attrs, MemTxResult *result)
3336{
3337 return address_space_lduw_internal(as, addr, attrs, result,
3338 DEVICE_NATIVE_ENDIAN);
3339}
3340
3341uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3342 MemTxAttrs attrs, MemTxResult *result)
3343{
3344 return address_space_lduw_internal(as, addr, attrs, result,
3345 DEVICE_LITTLE_ENDIAN);
3346}
3347
3348uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3349 MemTxAttrs attrs, MemTxResult *result)
3350{
3351 return address_space_lduw_internal(as, addr, attrs, result,
3352 DEVICE_BIG_ENDIAN);
3353}
3354
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003355uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003356{
Peter Maydell50013112015-04-26 16:49:24 +01003357 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358}
3359
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003360uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003361{
Peter Maydell50013112015-04-26 16:49:24 +01003362 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003363}
3364
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003365uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003366{
Peter Maydell50013112015-04-26 16:49:24 +01003367 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368}
3369
bellard8df1cd02005-01-28 22:37:22 +00003370/* warning: addr must be aligned. The ram page is not masked as dirty
3371 and the code inside is not invalidated. It is useful if the dirty
3372 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003373void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3374 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003375{
bellard8df1cd02005-01-28 22:37:22 +00003376 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003377 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003378 hwaddr l = 4;
3379 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003380 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003381 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003382 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003383
Paolo Bonzini41063e12015-03-18 14:21:43 +01003384 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003385 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003386 true);
3387 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003388 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003389
Peter Maydell50013112015-04-26 16:49:24 +01003390 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003391 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003392 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003393 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003394 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003395
Paolo Bonzini845b6212015-03-23 11:45:53 +01003396 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3397 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003398 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003399 r = MEMTX_OK;
3400 }
3401 if (result) {
3402 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003403 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003404 if (release_lock) {
3405 qemu_mutex_unlock_iothread();
3406 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003407 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003408}
3409
Peter Maydell50013112015-04-26 16:49:24 +01003410void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3411{
3412 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3413}
3414
bellard8df1cd02005-01-28 22:37:22 +00003415/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003416static inline void address_space_stl_internal(AddressSpace *as,
3417 hwaddr addr, uint32_t val,
3418 MemTxAttrs attrs,
3419 MemTxResult *result,
3420 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003421{
bellard8df1cd02005-01-28 22:37:22 +00003422 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003423 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003424 hwaddr l = 4;
3425 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003426 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003427 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003428
Paolo Bonzini41063e12015-03-18 14:21:43 +01003429 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003430 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003431 true);
3432 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003433 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003434
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003435#if defined(TARGET_WORDS_BIGENDIAN)
3436 if (endian == DEVICE_LITTLE_ENDIAN) {
3437 val = bswap32(val);
3438 }
3439#else
3440 if (endian == DEVICE_BIG_ENDIAN) {
3441 val = bswap32(val);
3442 }
3443#endif
Peter Maydell50013112015-04-26 16:49:24 +01003444 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003445 } else {
bellard8df1cd02005-01-28 22:37:22 +00003446 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003447 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003448 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003449 switch (endian) {
3450 case DEVICE_LITTLE_ENDIAN:
3451 stl_le_p(ptr, val);
3452 break;
3453 case DEVICE_BIG_ENDIAN:
3454 stl_be_p(ptr, val);
3455 break;
3456 default:
3457 stl_p(ptr, val);
3458 break;
3459 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003460 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003461 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003462 }
Peter Maydell50013112015-04-26 16:49:24 +01003463 if (result) {
3464 *result = r;
3465 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003466 if (release_lock) {
3467 qemu_mutex_unlock_iothread();
3468 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003469 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003470}
3471
3472void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3473 MemTxAttrs attrs, MemTxResult *result)
3474{
3475 address_space_stl_internal(as, addr, val, attrs, result,
3476 DEVICE_NATIVE_ENDIAN);
3477}
3478
3479void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3480 MemTxAttrs attrs, MemTxResult *result)
3481{
3482 address_space_stl_internal(as, addr, val, attrs, result,
3483 DEVICE_LITTLE_ENDIAN);
3484}
3485
3486void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3487 MemTxAttrs attrs, MemTxResult *result)
3488{
3489 address_space_stl_internal(as, addr, val, attrs, result,
3490 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003491}
3492
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003493void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003494{
Peter Maydell50013112015-04-26 16:49:24 +01003495 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003496}
3497
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003498void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003499{
Peter Maydell50013112015-04-26 16:49:24 +01003500 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003501}
3502
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003503void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003504{
Peter Maydell50013112015-04-26 16:49:24 +01003505 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003506}
3507
bellardaab33092005-10-30 20:48:42 +00003508/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003509void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3510 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003511{
3512 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003513 MemTxResult r;
3514
3515 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3516 if (result) {
3517 *result = r;
3518 }
3519}
3520
3521void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3522{
3523 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003524}
3525
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003526/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003527static inline void address_space_stw_internal(AddressSpace *as,
3528 hwaddr addr, uint32_t val,
3529 MemTxAttrs attrs,
3530 MemTxResult *result,
3531 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003532{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003533 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003534 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003535 hwaddr l = 2;
3536 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003537 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003538 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003539
Paolo Bonzini41063e12015-03-18 14:21:43 +01003540 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003541 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003542 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003543 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003544
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003545#if defined(TARGET_WORDS_BIGENDIAN)
3546 if (endian == DEVICE_LITTLE_ENDIAN) {
3547 val = bswap16(val);
3548 }
3549#else
3550 if (endian == DEVICE_BIG_ENDIAN) {
3551 val = bswap16(val);
3552 }
3553#endif
Peter Maydell50013112015-04-26 16:49:24 +01003554 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003555 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003556 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003557 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003558 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003559 switch (endian) {
3560 case DEVICE_LITTLE_ENDIAN:
3561 stw_le_p(ptr, val);
3562 break;
3563 case DEVICE_BIG_ENDIAN:
3564 stw_be_p(ptr, val);
3565 break;
3566 default:
3567 stw_p(ptr, val);
3568 break;
3569 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003570 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003571 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003572 }
Peter Maydell50013112015-04-26 16:49:24 +01003573 if (result) {
3574 *result = r;
3575 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003576 if (release_lock) {
3577 qemu_mutex_unlock_iothread();
3578 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003579 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003580}
3581
3582void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3583 MemTxAttrs attrs, MemTxResult *result)
3584{
3585 address_space_stw_internal(as, addr, val, attrs, result,
3586 DEVICE_NATIVE_ENDIAN);
3587}
3588
3589void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3590 MemTxAttrs attrs, MemTxResult *result)
3591{
3592 address_space_stw_internal(as, addr, val, attrs, result,
3593 DEVICE_LITTLE_ENDIAN);
3594}
3595
3596void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3597 MemTxAttrs attrs, MemTxResult *result)
3598{
3599 address_space_stw_internal(as, addr, val, attrs, result,
3600 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003601}
3602
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003603void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003604{
Peter Maydell50013112015-04-26 16:49:24 +01003605 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003606}
3607
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003608void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003609{
Peter Maydell50013112015-04-26 16:49:24 +01003610 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003611}
3612
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003613void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003614{
Peter Maydell50013112015-04-26 16:49:24 +01003615 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003616}
3617
bellardaab33092005-10-30 20:48:42 +00003618/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003619void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3620 MemTxAttrs attrs, MemTxResult *result)
3621{
3622 MemTxResult r;
3623 val = tswap64(val);
3624 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3625 if (result) {
3626 *result = r;
3627 }
3628}
3629
3630void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3631 MemTxAttrs attrs, MemTxResult *result)
3632{
3633 MemTxResult r;
3634 val = cpu_to_le64(val);
3635 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3636 if (result) {
3637 *result = r;
3638 }
3639}
3640void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3641 MemTxAttrs attrs, MemTxResult *result)
3642{
3643 MemTxResult r;
3644 val = cpu_to_be64(val);
3645 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3646 if (result) {
3647 *result = r;
3648 }
3649}
3650
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003651void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003652{
Peter Maydell50013112015-04-26 16:49:24 +01003653 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003654}
3655
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003656void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003657{
Peter Maydell50013112015-04-26 16:49:24 +01003658 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003659}
3660
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003661void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003662{
Peter Maydell50013112015-04-26 16:49:24 +01003663 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003664}
3665
aliguori5e2972f2009-03-28 17:51:36 +00003666/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003667int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003668 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003669{
3670 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003671 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003672 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003673
3674 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003675 int asidx;
3676 MemTxAttrs attrs;
3677
bellard13eb76e2004-01-24 15:23:36 +00003678 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003679 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3680 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003681 /* if no physical page mapped, return an error */
3682 if (phys_addr == -1)
3683 return -1;
3684 l = (page + TARGET_PAGE_SIZE) - addr;
3685 if (l > len)
3686 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003687 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003688 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003689 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3690 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003691 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003692 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3693 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003694 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003695 }
bellard13eb76e2004-01-24 15:23:36 +00003696 len -= l;
3697 buf += l;
3698 addr += l;
3699 }
3700 return 0;
3701}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003702
3703/*
3704 * Allows code that needs to deal with migration bitmaps etc to still be built
3705 * target independent.
3706 */
3707size_t qemu_target_page_bits(void)
3708{
3709 return TARGET_PAGE_BITS;
3710}
3711
Paul Brooka68fe892010-03-01 00:08:59 +00003712#endif
bellard13eb76e2004-01-24 15:23:36 +00003713
Blue Swirl8e4a4242013-01-06 18:30:17 +00003714/*
3715 * A helper function for the _utterly broken_ virtio device model to find out if
3716 * it's running on a big endian machine. Don't do this at home kids!
3717 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003718bool target_words_bigendian(void);
3719bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003720{
3721#if defined(TARGET_WORDS_BIGENDIAN)
3722 return true;
3723#else
3724 return false;
3725#endif
3726}
3727
Wen Congyang76f35532012-05-07 12:04:18 +08003728#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003729bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003730{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003731 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003732 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003733 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003734
Paolo Bonzini41063e12015-03-18 14:21:43 +01003735 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003736 mr = address_space_translate(&address_space_memory,
3737 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003738
Paolo Bonzini41063e12015-03-18 14:21:43 +01003739 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3740 rcu_read_unlock();
3741 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003742}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003743
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003744int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003745{
3746 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003747 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003748
Mike Day0dc3f442013-09-05 14:41:35 -04003749 rcu_read_lock();
3750 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003751 ret = func(block->idstr, block->host, block->offset,
3752 block->used_length, opaque);
3753 if (ret) {
3754 break;
3755 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003756 }
Mike Day0dc3f442013-09-05 14:41:35 -04003757 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003758 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003759}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003760#endif