blob: 274b619f888787a41109e0ffa1daec4bf318a64c [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000021#include <sys/mman.h>
22#endif
bellard54936002003-05-13 00:25:15 +000023
Stefan Weil055403b2010-10-22 23:03:32 +020024#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000026#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000027#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010028#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020029#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020033#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040048#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020049#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030051#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000052
Paolo Bonzini022c62c2012-12-17 18:19:49 +010053#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020054#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030055#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030058#ifndef _WIN32
59#include "qemu/mmap-alloc.h"
60#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061
blueswir1db7b5422007-05-26 17:36:03 +000062//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000063
pbrook99773bd2006-04-16 15:14:59 +000064#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040065/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
Mike Day0d53d9f2015-01-21 13:45:24 +010068RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030069
70static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030071static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030072
Avi Kivityf6790af2012-10-02 20:13:51 +020073AddressSpace address_space_io;
74AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020075
Paolo Bonzini0844e002013-05-24 14:37:28 +020076MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020077static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020078
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080079/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80#define RAM_PREALLOC (1 << 0)
81
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080082/* RAM is mmap-ed with MAP_SHARED */
83#define RAM_SHARED (1 << 1)
84
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020085/* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88#define RAM_RESIZEABLE (1 << 2)
89
pbrooke2eef172008-06-08 01:09:01 +000090#endif
bellard9fa3e852004-01-04 18:06:42 +000091
Andreas Färberbdc44642013-06-24 23:50:24 +020092struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000093/* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020095__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000096/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000097 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000098 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010099int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000100
pbrooke2eef172008-06-08 01:09:01 +0000101#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200102
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200103typedef struct PhysPageEntry PhysPageEntry;
104
105struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200109 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200110};
111
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100115#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200117#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118#define P_L2_SIZE (1 << P_L2_BITS)
119
120#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121
122typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200123
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200124typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100125 struct rcu_head rcu;
126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127 unsigned sections_nb;
128 unsigned sections_nb_alloc;
129 unsigned nodes_nb;
130 unsigned nodes_nb_alloc;
131 Node *nodes;
132 MemoryRegionSection *sections;
133} PhysPageMap;
134
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200135struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100136 struct rcu_head rcu;
137
Fam Zheng729633c2016-03-01 14:18:24 +0800138 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139 /* This is a multi-level map on the physical address space.
140 * The bottom level has pointers to MemoryRegionSections.
141 */
142 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200143 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200144 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200145};
146
Jan Kiszka90260c62013-05-26 21:46:51 +0200147#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
148typedef struct subpage_t {
149 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200150 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200151 hwaddr base;
152 uint16_t sub_section[TARGET_PAGE_SIZE];
153} subpage_t;
154
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200155#define PHYS_SECTION_UNASSIGNED 0
156#define PHYS_SECTION_NOTDIRTY 1
157#define PHYS_SECTION_ROM 2
158#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200159
pbrooke2eef172008-06-08 01:09:01 +0000160static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300161static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000162static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000163
Avi Kivity1ec9b902012-01-02 12:47:48 +0200164static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100165
166/**
167 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
168 * @cpu: the CPU whose AddressSpace this is
169 * @as: the AddressSpace itself
170 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
171 * @tcg_as_listener: listener for tracking changes to the AddressSpace
172 */
173struct CPUAddressSpace {
174 CPUState *cpu;
175 AddressSpace *as;
176 struct AddressSpaceDispatch *memory_dispatch;
177 MemoryListener tcg_as_listener;
178};
179
pbrook6658ffb2007-03-16 23:58:11 +0000180#endif
bellard54936002003-05-13 00:25:15 +0000181
Paul Brook6d9a1302010-02-28 23:55:53 +0000182#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200183
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200184static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200185{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200186 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
189 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200190 }
191}
192
Paolo Bonzinidb946042015-05-21 15:12:29 +0200193static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200194{
195 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200196 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200197 PhysPageEntry e;
198 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200199
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200201 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200202 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200203 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200204
205 e.skip = leaf ? 0 : 1;
206 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100207 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200208 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200209 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200211}
212
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200213static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
214 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200215 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216{
217 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100218 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200219
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200220 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200221 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200223 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Paolo Bonzini03f49952013-11-07 17:14:36 +0100226 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200227 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200228 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200229 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200230 *index += step;
231 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200232 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200234 }
235 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200236 }
237}
238
Avi Kivityac1970f2012-10-03 16:22:53 +0200239static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200240 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200241 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000242{
Avi Kivity29990972012-02-13 20:21:20 +0200243 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200244 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200246 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000247}
248
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200249/* Compact a non leaf page entry. Simply detect that the entry has a single child,
250 * and update our entry so we can skip it and go directly to the destination.
251 */
252static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
253{
254 unsigned valid_ptr = P_L2_SIZE;
255 int valid = 0;
256 PhysPageEntry *p;
257 int i;
258
259 if (lp->ptr == PHYS_MAP_NODE_NIL) {
260 return;
261 }
262
263 p = nodes[lp->ptr];
264 for (i = 0; i < P_L2_SIZE; i++) {
265 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
266 continue;
267 }
268
269 valid_ptr = i;
270 valid++;
271 if (p[i].skip) {
272 phys_page_compact(&p[i], nodes, compacted);
273 }
274 }
275
276 /* We can only compress if there's only one child. */
277 if (valid != 1) {
278 return;
279 }
280
281 assert(valid_ptr < P_L2_SIZE);
282
283 /* Don't compress if it won't fit in the # of bits we have. */
284 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
285 return;
286 }
287
288 lp->ptr = p[valid_ptr].ptr;
289 if (!p[valid_ptr].skip) {
290 /* If our only child is a leaf, make this a leaf. */
291 /* By design, we should have made this node a leaf to begin with so we
292 * should never reach here.
293 * But since it's so simple to handle this, let's do it just in case we
294 * change this rule.
295 */
296 lp->skip = 0;
297 } else {
298 lp->skip += p[valid_ptr].skip;
299 }
300}
301
302static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
303{
304 DECLARE_BITMAP(compacted, nodes_nb);
305
306 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200307 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200308 }
309}
310
Fam Zheng29cb5332016-03-01 14:18:23 +0800311static inline bool section_covers_addr(const MemoryRegionSection *section,
312 hwaddr addr)
313{
314 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
315 * the section must cover the entire address space.
316 */
317 return section->size.hi ||
318 range_covers_byte(section->offset_within_address_space,
319 section->size.lo, addr);
320}
321
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200322static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200323 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000324{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200325 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200326 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200327 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200328
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200329 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200330 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200331 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200332 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200333 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100334 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200335 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200336
Fam Zheng29cb5332016-03-01 14:18:23 +0800337 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200338 return &sections[lp.ptr];
339 } else {
340 return &sections[PHYS_SECTION_UNASSIGNED];
341 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200342}
343
Blue Swirle5548612012-04-21 13:08:33 +0000344bool memory_region_is_unassigned(MemoryRegion *mr)
345{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200346 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000347 && mr != &io_mem_watch;
348}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200349
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100350/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200351static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200352 hwaddr addr,
353 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200354{
Fam Zheng729633c2016-03-01 14:18:24 +0800355 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800357 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200358
Fam Zheng729633c2016-03-01 14:18:24 +0800359 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
360 section_covers_addr(section, addr)) {
361 update = false;
362 } else {
363 section = phys_page_find(d->phys_map, addr, d->map.nodes,
364 d->map.sections);
365 update = true;
366 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200367 if (resolve_subpage && section->mr->subpage) {
368 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200369 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200370 }
Fam Zheng729633c2016-03-01 14:18:24 +0800371 if (update) {
372 atomic_set(&d->mru_section, section);
373 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200374 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200375}
376
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100377/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200378static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200379address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200381{
382 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200383 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100384 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200385
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200386 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387 /* Compute offset within MemoryRegionSection */
388 addr -= section->offset_within_address_space;
389
390 /* Compute offset within MemoryRegion */
391 *xlat = addr + section->offset_within_region;
392
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200393 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200394
395 /* MMIO registers can be expected to perform full-width accesses based only
396 * on their address, without considering adjacent registers that could
397 * decode to completely different MemoryRegions. When such registers
398 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
399 * regions overlap wildly. For this reason we cannot clamp the accesses
400 * here.
401 *
402 * If the length is small (as is the case for address_space_ldl/stl),
403 * everything works fine. If the incoming length is large, however,
404 * the caller really has to do the clamping through memory_access_size.
405 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200406 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200407 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200408 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
409 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200410 return section;
411}
Jan Kiszka90260c62013-05-26 21:46:51 +0200412
Paolo Bonzini41063e12015-03-18 14:21:43 +0100413/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200414MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
415 hwaddr *xlat, hwaddr *plen,
416 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200417{
Avi Kivity30951152012-10-30 13:47:46 +0200418 IOMMUTLBEntry iotlb;
419 MemoryRegionSection *section;
420 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200421
422 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100423 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
424 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200425 mr = section->mr;
426
427 if (!mr->iommu_ops) {
428 break;
429 }
430
Le Tan8d7b8cb2014-08-16 13:55:37 +0800431 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200432 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
433 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700434 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200435 if (!(iotlb.perm & (1 << is_write))) {
436 mr = &io_mem_unassigned;
437 break;
438 }
439
440 as = iotlb.target_as;
441 }
442
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000443 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100444 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700445 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100446 }
447
Avi Kivity30951152012-10-30 13:47:46 +0200448 *xlat = addr;
449 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200450}
451
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100452/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200453MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000454address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200455 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200456{
Avi Kivity30951152012-10-30 13:47:46 +0200457 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000458 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
459
460 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200461
462 assert(!section->mr->iommu_ops);
463 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200464}
bellard9fa3e852004-01-04 18:06:42 +0000465#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000466
Andreas Färberb170fce2013-01-20 20:23:22 +0100467#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000468
Juan Quintelae59fb372009-09-29 22:48:21 +0200469static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200470{
Andreas Färber259186a2013-01-17 18:51:17 +0100471 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200472
aurel323098dba2009-03-07 21:28:24 +0000473 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
474 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100475 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100476 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000477
478 return 0;
479}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200480
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400481static int cpu_common_pre_load(void *opaque)
482{
483 CPUState *cpu = opaque;
484
Paolo Bonziniadee6422014-12-19 12:53:14 +0100485 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400486
487 return 0;
488}
489
490static bool cpu_common_exception_index_needed(void *opaque)
491{
492 CPUState *cpu = opaque;
493
Paolo Bonziniadee6422014-12-19 12:53:14 +0100494 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400495}
496
497static const VMStateDescription vmstate_cpu_common_exception_index = {
498 .name = "cpu_common/exception_index",
499 .version_id = 1,
500 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200501 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400502 .fields = (VMStateField[]) {
503 VMSTATE_INT32(exception_index, CPUState),
504 VMSTATE_END_OF_LIST()
505 }
506};
507
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300508static bool cpu_common_crash_occurred_needed(void *opaque)
509{
510 CPUState *cpu = opaque;
511
512 return cpu->crash_occurred;
513}
514
515static const VMStateDescription vmstate_cpu_common_crash_occurred = {
516 .name = "cpu_common/crash_occurred",
517 .version_id = 1,
518 .minimum_version_id = 1,
519 .needed = cpu_common_crash_occurred_needed,
520 .fields = (VMStateField[]) {
521 VMSTATE_BOOL(crash_occurred, CPUState),
522 VMSTATE_END_OF_LIST()
523 }
524};
525
Andreas Färber1a1562f2013-06-17 04:09:11 +0200526const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200527 .name = "cpu_common",
528 .version_id = 1,
529 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400530 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200531 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200532 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100533 VMSTATE_UINT32(halted, CPUState),
534 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200537 .subsections = (const VMStateDescription*[]) {
538 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300539 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200540 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 }
542};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200543
pbrook9656f322008-07-01 20:01:19 +0000544#endif
545
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100546CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400547{
Andreas Färberbdc44642013-06-24 23:50:24 +0200548 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400549
Andreas Färberbdc44642013-06-24 23:50:24 +0200550 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100551 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200552 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100553 }
Glauber Costa950f1472009-06-09 12:15:18 -0400554 }
555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400557}
558
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000559#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000560void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000561{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000562 CPUAddressSpace *newas;
563
564 /* Target code should have set num_ases before calling us */
565 assert(asidx < cpu->num_ases);
566
Peter Maydell56943e82016-01-21 14:15:04 +0000567 if (asidx == 0) {
568 /* address space 0 gets the convenience alias */
569 cpu->as = as;
570 }
571
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000572 /* KVM cannot currently support multiple address spaces. */
573 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000574
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000575 if (!cpu->cpu_ases) {
576 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000577 }
Peter Maydell32857f42015-10-01 15:29:50 +0100578
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000579 newas = &cpu->cpu_ases[asidx];
580 newas->cpu = cpu;
581 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000582 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000583 newas->tcg_as_listener.commit = tcg_commit;
584 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000585 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000586}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000587
588AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
589{
590 /* Return the AddressSpace corresponding to the specified index */
591 return cpu->cpu_ases[asidx].as;
592}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000593#endif
594
Bharata B Raob7bca732015-06-23 19:31:13 -0700595#ifndef CONFIG_USER_ONLY
596static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
597
598static int cpu_get_free_index(Error **errp)
599{
600 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
601
602 if (cpu >= MAX_CPUMASK_BITS) {
603 error_setg(errp, "Trying to use more CPUs than max of %d",
604 MAX_CPUMASK_BITS);
605 return -1;
606 }
607
608 bitmap_set(cpu_index_map, cpu, 1);
609 return cpu;
610}
611
612void cpu_exec_exit(CPUState *cpu)
613{
614 if (cpu->cpu_index == -1) {
615 /* cpu_index was never allocated by this @cpu or was already freed. */
616 return;
617 }
618
619 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
620 cpu->cpu_index = -1;
621}
622#else
623
624static int cpu_get_free_index(Error **errp)
625{
626 CPUState *some_cpu;
627 int cpu_index = 0;
628
629 CPU_FOREACH(some_cpu) {
630 cpu_index++;
631 }
632 return cpu_index;
633}
634
635void cpu_exec_exit(CPUState *cpu)
636{
637}
638#endif
639
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700640void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000641{
Andreas Färberb170fce2013-01-20 20:23:22 +0100642 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000643 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700644 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000645
Peter Maydell56943e82016-01-21 14:15:04 +0000646 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000647 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000648
Eduardo Habkost291135b2015-04-27 17:00:33 -0300649#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300650 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000651
652 /* This is a softmmu CPU object, so create a property for it
653 * so users can wire up its memory. (This can't go in qom/cpu.c
654 * because that file is compiled only once for both user-mode
655 * and system builds.) The default if no link is set up is to use
656 * the system address space.
657 */
658 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
659 (Object **)&cpu->memory,
660 qdev_prop_allow_set_link_before_realize,
661 OBJ_PROP_LINK_UNREF_ON_RELEASE,
662 &error_abort);
663 cpu->memory = system_memory;
664 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300665#endif
666
pbrookc2764712009-03-07 15:24:59 +0000667#if defined(CONFIG_USER_ONLY)
668 cpu_list_lock();
669#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700670 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
671 if (local_err) {
672 error_propagate(errp, local_err);
673#if defined(CONFIG_USER_ONLY)
674 cpu_list_unlock();
675#endif
676 return;
bellard6a00d602005-11-21 23:25:50 +0000677 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200678 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000679#if defined(CONFIG_USER_ONLY)
680 cpu_list_unlock();
681#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200682 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
683 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
684 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100685 if (cc->vmsd != NULL) {
686 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
687 }
bellardfd6ce8f2003-05-14 19:00:11 +0000688}
689
Paul Brook94df27f2010-02-28 23:47:45 +0000690#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200691static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000692{
693 tb_invalidate_phys_page_range(pc, pc + 1, 0);
694}
695#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200696static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400697{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000698 MemTxAttrs attrs;
699 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
700 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400701 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000702 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100703 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400704 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400705}
bellardc27004e2005-01-03 23:35:10 +0000706#endif
bellardd720b932004-04-25 17:57:43 +0000707
Paul Brookc527ee82010-03-01 03:31:14 +0000708#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200709void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000710
711{
712}
713
Peter Maydell3ee887e2014-09-12 14:06:48 +0100714int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
715 int flags)
716{
717 return -ENOSYS;
718}
719
720void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
721{
722}
723
Andreas Färber75a34032013-09-02 16:57:02 +0200724int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000725 int flags, CPUWatchpoint **watchpoint)
726{
727 return -ENOSYS;
728}
729#else
pbrook6658ffb2007-03-16 23:58:11 +0000730/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200731int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000732 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000733{
aliguoric0ce9982008-11-25 22:13:57 +0000734 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000735
Peter Maydell05068c02014-09-12 14:06:48 +0100736 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700737 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200738 error_report("tried to set invalid watchpoint at %"
739 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000740 return -EINVAL;
741 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500742 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000743
aliguoria1d1bb32008-11-18 20:07:32 +0000744 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100745 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000746 wp->flags = flags;
747
aliguori2dc9f412008-11-18 20:56:59 +0000748 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200749 if (flags & BP_GDB) {
750 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
751 } else {
752 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
753 }
aliguoria1d1bb32008-11-18 20:07:32 +0000754
Andreas Färber31b030d2013-09-04 01:29:02 +0200755 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000756
757 if (watchpoint)
758 *watchpoint = wp;
759 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000760}
761
aliguoria1d1bb32008-11-18 20:07:32 +0000762/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200763int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000764 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000765{
aliguoria1d1bb32008-11-18 20:07:32 +0000766 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000767
Andreas Färberff4700b2013-08-26 18:23:18 +0200768 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100769 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000770 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200771 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000772 return 0;
773 }
774 }
aliguoria1d1bb32008-11-18 20:07:32 +0000775 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000776}
777
aliguoria1d1bb32008-11-18 20:07:32 +0000778/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200779void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000780{
Andreas Färberff4700b2013-08-26 18:23:18 +0200781 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000782
Andreas Färber31b030d2013-09-04 01:29:02 +0200783 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000784
Anthony Liguori7267c092011-08-20 22:09:37 -0500785 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000786}
787
aliguoria1d1bb32008-11-18 20:07:32 +0000788/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200789void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000790{
aliguoric0ce9982008-11-25 22:13:57 +0000791 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000792
Andreas Färberff4700b2013-08-26 18:23:18 +0200793 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200794 if (wp->flags & mask) {
795 cpu_watchpoint_remove_by_ref(cpu, wp);
796 }
aliguoric0ce9982008-11-25 22:13:57 +0000797 }
aliguoria1d1bb32008-11-18 20:07:32 +0000798}
Peter Maydell05068c02014-09-12 14:06:48 +0100799
800/* Return true if this watchpoint address matches the specified
801 * access (ie the address range covered by the watchpoint overlaps
802 * partially or completely with the address range covered by the
803 * access).
804 */
805static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
806 vaddr addr,
807 vaddr len)
808{
809 /* We know the lengths are non-zero, but a little caution is
810 * required to avoid errors in the case where the range ends
811 * exactly at the top of the address space and so addr + len
812 * wraps round to zero.
813 */
814 vaddr wpend = wp->vaddr + wp->len - 1;
815 vaddr addrend = addr + len - 1;
816
817 return !(addr > wpend || wp->vaddr > addrend);
818}
819
Paul Brookc527ee82010-03-01 03:31:14 +0000820#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000821
822/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200823int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000824 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000825{
aliguoric0ce9982008-11-25 22:13:57 +0000826 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000827
Anthony Liguori7267c092011-08-20 22:09:37 -0500828 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000829
830 bp->pc = pc;
831 bp->flags = flags;
832
aliguori2dc9f412008-11-18 20:56:59 +0000833 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200834 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200835 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200836 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200838 }
aliguoria1d1bb32008-11-18 20:07:32 +0000839
Andreas Färberf0c3c502013-08-26 21:22:53 +0200840 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färber00b941e2013-06-29 18:55:54 +0200842 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000843 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200844 }
aliguoria1d1bb32008-11-18 20:07:32 +0000845 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000846}
847
848/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200849int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000850{
aliguoria1d1bb32008-11-18 20:07:32 +0000851 CPUBreakpoint *bp;
852
Andreas Färberf0c3c502013-08-26 21:22:53 +0200853 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000854 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200855 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000856 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000857 }
bellard4c3a88a2003-07-26 12:06:08 +0000858 }
aliguoria1d1bb32008-11-18 20:07:32 +0000859 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000860}
861
aliguoria1d1bb32008-11-18 20:07:32 +0000862/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200863void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000864{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200865 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
866
867 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000868
Anthony Liguori7267c092011-08-20 22:09:37 -0500869 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000870}
871
872/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200873void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000874{
aliguoric0ce9982008-11-25 22:13:57 +0000875 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000876
Andreas Färberf0c3c502013-08-26 21:22:53 +0200877 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200878 if (bp->flags & mask) {
879 cpu_breakpoint_remove_by_ref(cpu, bp);
880 }
aliguoric0ce9982008-11-25 22:13:57 +0000881 }
bellard4c3a88a2003-07-26 12:06:08 +0000882}
883
bellardc33a3462003-07-29 20:50:33 +0000884/* enable or disable single step mode. EXCP_DEBUG is returned by the
885 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200886void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000887{
Andreas Färbered2803d2013-06-21 20:20:45 +0200888 if (cpu->singlestep_enabled != enabled) {
889 cpu->singlestep_enabled = enabled;
890 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200891 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200892 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100893 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000894 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700895 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000896 }
bellardc33a3462003-07-29 20:50:33 +0000897 }
bellardc33a3462003-07-29 20:50:33 +0000898}
899
Andreas Färbera47dddd2013-09-03 17:38:47 +0200900void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000901{
902 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000903 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000904
905 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000906 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000907 fprintf(stderr, "qemu: fatal: ");
908 vfprintf(stderr, fmt, ap);
909 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200910 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100911 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000912 qemu_log("qemu: fatal: ");
913 qemu_log_vprintf(fmt, ap2);
914 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200915 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000916 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000917 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000918 }
pbrook493ae1f2007-11-23 16:53:59 +0000919 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000920 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300921 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200922#if defined(CONFIG_USER_ONLY)
923 {
924 struct sigaction act;
925 sigfillset(&act.sa_mask);
926 act.sa_handler = SIG_DFL;
927 sigaction(SIGABRT, &act, NULL);
928 }
929#endif
bellard75012672003-06-21 13:11:07 +0000930 abort();
931}
932
bellard01243112004-01-04 15:48:17 +0000933#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400934/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200935static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
936{
937 RAMBlock *block;
938
Paolo Bonzini43771532013-09-09 17:58:40 +0200939 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200940 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200941 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200942 }
Mike Day0dc3f442013-09-05 14:41:35 -0400943 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200944 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200945 goto found;
946 }
947 }
948
949 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
950 abort();
951
952found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200953 /* It is safe to write mru_block outside the iothread lock. This
954 * is what happens:
955 *
956 * mru_block = xxx
957 * rcu_read_unlock()
958 * xxx removed from list
959 * rcu_read_lock()
960 * read mru_block
961 * mru_block = NULL;
962 * call_rcu(reclaim_ramblock, xxx);
963 * rcu_read_unlock()
964 *
965 * atomic_rcu_set is not needed here. The block was already published
966 * when it was placed into the list. Here we're just making an extra
967 * copy of the pointer.
968 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200969 ram_list.mru_block = block;
970 return block;
971}
972
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200973static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000974{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700975 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200976 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200977 RAMBlock *block;
978 ram_addr_t end;
979
980 end = TARGET_PAGE_ALIGN(start + length);
981 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000982
Mike Day0dc3f442013-09-05 14:41:35 -0400983 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200984 block = qemu_get_ram_block(start);
985 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200986 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700987 CPU_FOREACH(cpu) {
988 tlb_reset_dirty(cpu, start1, length);
989 }
Mike Day0dc3f442013-09-05 14:41:35 -0400990 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200991}
992
993/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000994bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
995 ram_addr_t length,
996 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200997{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000998 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000999 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001000 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001001
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001002 if (length == 0) {
1003 return false;
1004 }
1005
1006 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1007 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001008
1009 rcu_read_lock();
1010
1011 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1012
1013 while (page < end) {
1014 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1015 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1016 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1017
1018 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1019 offset, num);
1020 page += num;
1021 }
1022
1023 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001024
1025 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001026 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001027 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001028
1029 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001030}
1031
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001032/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001033hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001034 MemoryRegionSection *section,
1035 target_ulong vaddr,
1036 hwaddr paddr, hwaddr xlat,
1037 int prot,
1038 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001039{
Avi Kivitya8170e52012-10-23 12:30:10 +02001040 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001041 CPUWatchpoint *wp;
1042
Blue Swirlcc5bea62012-04-14 14:56:48 +00001043 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001044 /* Normal RAM. */
1045 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001046 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001047 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001048 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001049 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001050 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001051 }
1052 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001053 AddressSpaceDispatch *d;
1054
1055 d = atomic_rcu_read(&section->address_space->dispatch);
1056 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001057 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001058 }
1059
1060 /* Make accesses to pages with watchpoints go via the
1061 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001062 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001063 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001064 /* Avoid trapping reads of pages with a write breakpoint. */
1065 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001066 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001067 *address |= TLB_MMIO;
1068 break;
1069 }
1070 }
1071 }
1072
1073 return iotlb;
1074}
bellard9fa3e852004-01-04 18:06:42 +00001075#endif /* defined(CONFIG_USER_ONLY) */
1076
pbrooke2eef172008-06-08 01:09:01 +00001077#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001078
Anthony Liguoric227f092009-10-01 16:12:16 -05001079static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001080 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001081static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001082
Igor Mammedova2b257d2014-10-31 16:38:37 +00001083static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1084 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001085
1086/*
1087 * Set a custom physical guest memory alloator.
1088 * Accelerators with unusual needs may need this. Hopefully, we can
1089 * get rid of it eventually.
1090 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001091void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001092{
1093 phys_mem_alloc = alloc;
1094}
1095
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001096static uint16_t phys_section_add(PhysPageMap *map,
1097 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001098{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001099 /* The physical section number is ORed with a page-aligned
1100 * pointer to produce the iotlb entries. Thus it should
1101 * never overflow into the page-aligned value.
1102 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001103 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001104
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001105 if (map->sections_nb == map->sections_nb_alloc) {
1106 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1107 map->sections = g_renew(MemoryRegionSection, map->sections,
1108 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001109 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001110 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001111 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001112 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001113}
1114
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001115static void phys_section_destroy(MemoryRegion *mr)
1116{
Don Slutz55b4e802015-11-30 17:11:04 -05001117 bool have_sub_page = mr->subpage;
1118
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001119 memory_region_unref(mr);
1120
Don Slutz55b4e802015-11-30 17:11:04 -05001121 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001122 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001123 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001124 g_free(subpage);
1125 }
1126}
1127
Paolo Bonzini60926662013-05-29 12:30:26 +02001128static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001129{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001130 while (map->sections_nb > 0) {
1131 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001132 phys_section_destroy(section->mr);
1133 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001134 g_free(map->sections);
1135 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001136}
1137
Avi Kivityac1970f2012-10-03 16:22:53 +02001138static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001139{
1140 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001141 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001142 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001143 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001144 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001145 MemoryRegionSection subsection = {
1146 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001147 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001148 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001149 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001150
Avi Kivityf3705d52012-03-08 16:16:34 +02001151 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001152
Avi Kivityf3705d52012-03-08 16:16:34 +02001153 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001154 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001155 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001156 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001157 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001158 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001160 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001161 }
1162 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001163 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001164 subpage_register(subpage, start, end,
1165 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001166}
1167
1168
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001169static void register_multipage(AddressSpaceDispatch *d,
1170 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001171{
Avi Kivitya8170e52012-10-23 12:30:10 +02001172 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001173 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001174 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1175 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001176
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001177 assert(num_pages);
1178 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001179}
1180
Avi Kivityac1970f2012-10-03 16:22:53 +02001181static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001182{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001183 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001184 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001185 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001186 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001187
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001188 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1189 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1190 - now.offset_within_address_space;
1191
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001192 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001193 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001194 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001195 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001196 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001197 while (int128_ne(remain.size, now.size)) {
1198 remain.size = int128_sub(remain.size, now.size);
1199 remain.offset_within_address_space += int128_get64(now.size);
1200 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001201 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001202 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001203 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001204 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001205 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001206 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001207 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001208 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001209 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001210 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001211 }
1212}
1213
Sheng Yang62a27442010-01-26 19:21:16 +08001214void qemu_flush_coalesced_mmio_buffer(void)
1215{
1216 if (kvm_enabled())
1217 kvm_flush_coalesced_mmio_buffer();
1218}
1219
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001220void qemu_mutex_lock_ramlist(void)
1221{
1222 qemu_mutex_lock(&ram_list.mutex);
1223}
1224
1225void qemu_mutex_unlock_ramlist(void)
1226{
1227 qemu_mutex_unlock(&ram_list.mutex);
1228}
1229
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001230#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001231static void *file_ram_alloc(RAMBlock *block,
1232 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001233 const char *path,
1234 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001235{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001236 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001238 char *sanitized_name;
1239 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001240 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001241 int fd;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001242 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001243
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001244 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1245 error_setg(errp,
1246 "host lacks kvm mmu notifiers, -mem-path unsupported");
1247 return NULL;
1248 }
1249
1250 for (;;) {
1251 fd = open(path, O_RDWR);
1252 if (fd >= 0) {
1253 /* @path names an existing file, use it */
1254 break;
1255 }
1256 if (errno == ENOENT) {
1257 /* @path names a file that doesn't exist, create it */
1258 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1259 if (fd >= 0) {
1260 unlink_on_error = true;
1261 break;
1262 }
1263 } else if (errno == EISDIR) {
1264 /* @path names a directory, create a file there */
1265 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1266 sanitized_name = g_strdup(memory_region_name(block->mr));
1267 for (c = sanitized_name; *c != '\0'; c++) {
1268 if (*c == '/') {
1269 *c = '_';
1270 }
1271 }
1272
1273 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1274 sanitized_name);
1275 g_free(sanitized_name);
1276
1277 fd = mkstemp(filename);
1278 if (fd >= 0) {
1279 unlink(filename);
1280 g_free(filename);
1281 break;
1282 }
1283 g_free(filename);
1284 }
1285 if (errno != EEXIST && errno != EINTR) {
1286 error_setg_errno(errp, errno,
1287 "can't open backing store %s for guest RAM",
1288 path);
1289 goto error;
1290 }
1291 /*
1292 * Try again on EINTR and EEXIST. The latter happens when
1293 * something else creates the file between our two open().
1294 */
1295 }
1296
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001297 page_size = qemu_fd_getpagesize(fd);
1298 block->mr->align = page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001299
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001300 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001301 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001302 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001303 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001304 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001305 }
1306
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001307 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001308
1309 /*
1310 * ftruncate is not supported by hugetlbfs in older
1311 * hosts, so don't bother bailing out on errors.
1312 * If anything goes wrong with it under other filesystems,
1313 * mmap will fail.
1314 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001315 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001316 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001317 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001318
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001319 area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001320 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001321 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001322 "unable to map backing store for guest RAM");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001323 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001324 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001325 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001326
1327 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001328 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001329 }
1330
Alex Williamson04b16652010-07-02 11:13:17 -06001331 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001332 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001333
1334error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001335 if (unlink_on_error) {
1336 unlink(path);
1337 }
1338 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001339 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001340}
1341#endif
1342
Mike Day0dc3f442013-09-05 14:41:35 -04001343/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001344static ram_addr_t find_ram_offset(ram_addr_t size)
1345{
Alex Williamson04b16652010-07-02 11:13:17 -06001346 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001347 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001348
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001349 assert(size != 0); /* it would hand out same offset multiple times */
1350
Mike Day0dc3f442013-09-05 14:41:35 -04001351 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001352 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001353 }
Alex Williamson04b16652010-07-02 11:13:17 -06001354
Mike Day0dc3f442013-09-05 14:41:35 -04001355 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001356 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001357
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001358 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001359
Mike Day0dc3f442013-09-05 14:41:35 -04001360 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001361 if (next_block->offset >= end) {
1362 next = MIN(next, next_block->offset);
1363 }
1364 }
1365 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001366 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001367 mingap = next - end;
1368 }
1369 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001370
1371 if (offset == RAM_ADDR_MAX) {
1372 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1373 (uint64_t)size);
1374 abort();
1375 }
1376
Alex Williamson04b16652010-07-02 11:13:17 -06001377 return offset;
1378}
1379
Juan Quintela652d7ec2012-07-20 10:37:54 +02001380ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001381{
Alex Williamsond17b5282010-06-25 11:08:38 -06001382 RAMBlock *block;
1383 ram_addr_t last = 0;
1384
Mike Day0dc3f442013-09-05 14:41:35 -04001385 rcu_read_lock();
1386 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001387 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001388 }
Mike Day0dc3f442013-09-05 14:41:35 -04001389 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001390 return last;
1391}
1392
Jason Baronddb97f12012-08-02 15:44:16 -04001393static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1394{
1395 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001396
1397 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001398 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001399 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1400 if (ret) {
1401 perror("qemu_madvise");
1402 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1403 "but dump_guest_core=off specified\n");
1404 }
1405 }
1406}
1407
Mike Day0dc3f442013-09-05 14:41:35 -04001408/* Called within an RCU critical section, or while the ramlist lock
1409 * is held.
1410 */
Hu Tao20cfe882014-04-02 15:13:26 +08001411static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001412{
Hu Tao20cfe882014-04-02 15:13:26 +08001413 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001414
Mike Day0dc3f442013-09-05 14:41:35 -04001415 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001416 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001417 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001418 }
1419 }
Hu Tao20cfe882014-04-02 15:13:26 +08001420
1421 return NULL;
1422}
1423
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001424const char *qemu_ram_get_idstr(RAMBlock *rb)
1425{
1426 return rb->idstr;
1427}
1428
Mike Dayae3a7042013-09-05 14:41:35 -04001429/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001430void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1431{
Mike Dayae3a7042013-09-05 14:41:35 -04001432 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001433
Mike Day0dc3f442013-09-05 14:41:35 -04001434 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001435 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001436 assert(new_block);
1437 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001438
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001439 if (dev) {
1440 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001441 if (id) {
1442 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001443 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001444 }
1445 }
1446 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1447
Mike Day0dc3f442013-09-05 14:41:35 -04001448 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001449 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001450 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1451 new_block->idstr);
1452 abort();
1453 }
1454 }
Mike Day0dc3f442013-09-05 14:41:35 -04001455 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001456}
1457
Mike Dayae3a7042013-09-05 14:41:35 -04001458/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001459void qemu_ram_unset_idstr(ram_addr_t addr)
1460{
Mike Dayae3a7042013-09-05 14:41:35 -04001461 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001462
Mike Dayae3a7042013-09-05 14:41:35 -04001463 /* FIXME: arch_init.c assumes that this is not called throughout
1464 * migration. Ignore the problem since hot-unplug during migration
1465 * does not work anyway.
1466 */
1467
Mike Day0dc3f442013-09-05 14:41:35 -04001468 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001469 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001470 if (block) {
1471 memset(block->idstr, 0, sizeof(block->idstr));
1472 }
Mike Day0dc3f442013-09-05 14:41:35 -04001473 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001474}
1475
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001476static int memory_try_enable_merging(void *addr, size_t len)
1477{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001478 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001479 /* disabled by the user */
1480 return 0;
1481 }
1482
1483 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1484}
1485
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001486/* Only legal before guest might have detected the memory size: e.g. on
1487 * incoming migration, or right after reset.
1488 *
1489 * As memory core doesn't know how is memory accessed, it is up to
1490 * resize callback to update device state and/or add assertions to detect
1491 * misuse, if necessary.
1492 */
1493int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1494{
1495 RAMBlock *block = find_ram_block(base);
1496
1497 assert(block);
1498
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001499 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001500
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001501 if (block->used_length == newsize) {
1502 return 0;
1503 }
1504
1505 if (!(block->flags & RAM_RESIZEABLE)) {
1506 error_setg_errno(errp, EINVAL,
1507 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1508 " in != 0x" RAM_ADDR_FMT, block->idstr,
1509 newsize, block->used_length);
1510 return -EINVAL;
1511 }
1512
1513 if (block->max_length < newsize) {
1514 error_setg_errno(errp, EINVAL,
1515 "Length too large: %s: 0x" RAM_ADDR_FMT
1516 " > 0x" RAM_ADDR_FMT, block->idstr,
1517 newsize, block->max_length);
1518 return -EINVAL;
1519 }
1520
1521 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1522 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001523 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1524 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001525 memory_region_set_size(block->mr, newsize);
1526 if (block->resized) {
1527 block->resized(block->idstr, newsize, block->host);
1528 }
1529 return 0;
1530}
1531
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001532/* Called with ram_list.mutex held */
1533static void dirty_memory_extend(ram_addr_t old_ram_size,
1534 ram_addr_t new_ram_size)
1535{
1536 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1537 DIRTY_MEMORY_BLOCK_SIZE);
1538 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1539 DIRTY_MEMORY_BLOCK_SIZE);
1540 int i;
1541
1542 /* Only need to extend if block count increased */
1543 if (new_num_blocks <= old_num_blocks) {
1544 return;
1545 }
1546
1547 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1548 DirtyMemoryBlocks *old_blocks;
1549 DirtyMemoryBlocks *new_blocks;
1550 int j;
1551
1552 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1553 new_blocks = g_malloc(sizeof(*new_blocks) +
1554 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1555
1556 if (old_num_blocks) {
1557 memcpy(new_blocks->blocks, old_blocks->blocks,
1558 old_num_blocks * sizeof(old_blocks->blocks[0]));
1559 }
1560
1561 for (j = old_num_blocks; j < new_num_blocks; j++) {
1562 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1563 }
1564
1565 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1566
1567 if (old_blocks) {
1568 g_free_rcu(old_blocks, rcu);
1569 }
1570 }
1571}
1572
Fam Zheng528f46a2016-03-01 14:18:18 +08001573static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001574{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001575 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001576 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001577 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001578 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001579
1580 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001581
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001582 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001583 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001584
1585 if (!new_block->host) {
1586 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001587 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001588 new_block->mr, &err);
1589 if (err) {
1590 error_propagate(errp, err);
1591 qemu_mutex_unlock_ramlist();
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001592 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001593 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001594 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001595 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001596 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001597 error_setg_errno(errp, errno,
1598 "cannot set up guest memory '%s'",
1599 memory_region_name(new_block->mr));
1600 qemu_mutex_unlock_ramlist();
Markus Armbruster39228252013-07-31 15:11:11 +02001601 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001602 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001603 }
1604 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001605
Li Zhijiandd631692015-07-02 20:18:06 +08001606 new_ram_size = MAX(old_ram_size,
1607 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1608 if (new_ram_size > old_ram_size) {
1609 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001610 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001611 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001612 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1613 * QLIST (which has an RCU-friendly variant) does not have insertion at
1614 * tail, so save the last element in last_block.
1615 */
Mike Day0dc3f442013-09-05 14:41:35 -04001616 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001617 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001618 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001619 break;
1620 }
1621 }
1622 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001623 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001624 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001625 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001626 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001627 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001628 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001629 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001630
Mike Day0dc3f442013-09-05 14:41:35 -04001631 /* Write list before version */
1632 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001633 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001634 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001635
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001636 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001637 new_block->used_length,
1638 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001639
Paolo Bonzinia904c912015-01-21 16:18:35 +01001640 if (new_block->host) {
1641 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1642 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1643 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1644 if (kvm_enabled()) {
1645 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1646 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001647 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001648}
1649
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001650#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001651RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1652 bool share, const char *mem_path,
1653 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001654{
1655 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001656 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657
1658 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001659 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001660 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001661 }
1662
1663 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1664 /*
1665 * file_ram_alloc() needs to allocate just like
1666 * phys_mem_alloc, but we haven't bothered to provide
1667 * a hook there.
1668 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001669 error_setg(errp,
1670 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001671 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001672 }
1673
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001674 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001675 new_block = g_malloc0(sizeof(*new_block));
1676 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001677 new_block->used_length = size;
1678 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001679 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001680 new_block->host = file_ram_alloc(new_block, size,
1681 mem_path, errp);
1682 if (!new_block->host) {
1683 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001684 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001685 }
1686
Fam Zheng528f46a2016-03-01 14:18:18 +08001687 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001688 if (local_err) {
1689 g_free(new_block);
1690 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001691 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001692 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001693 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001694}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001695#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001696
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001697static
Fam Zheng528f46a2016-03-01 14:18:18 +08001698RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1699 void (*resized)(const char*,
1700 uint64_t length,
1701 void *host),
1702 void *host, bool resizeable,
1703 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001704{
1705 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001706 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001707
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001708 size = HOST_PAGE_ALIGN(size);
1709 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001710 new_block = g_malloc0(sizeof(*new_block));
1711 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001712 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001713 new_block->used_length = size;
1714 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001715 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001716 new_block->fd = -1;
1717 new_block->host = host;
1718 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001719 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001720 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001721 if (resizeable) {
1722 new_block->flags |= RAM_RESIZEABLE;
1723 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001724 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001725 if (local_err) {
1726 g_free(new_block);
1727 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001728 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001729 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001730 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001731}
1732
Fam Zheng528f46a2016-03-01 14:18:18 +08001733RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001734 MemoryRegion *mr, Error **errp)
1735{
1736 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1737}
1738
Fam Zheng528f46a2016-03-01 14:18:18 +08001739RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001740{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001741 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1742}
1743
Fam Zheng528f46a2016-03-01 14:18:18 +08001744RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001745 void (*resized)(const char*,
1746 uint64_t length,
1747 void *host),
1748 MemoryRegion *mr, Error **errp)
1749{
1750 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001751}
bellarde9a1ab12007-02-08 23:08:38 +00001752
Paolo Bonzini43771532013-09-09 17:58:40 +02001753static void reclaim_ramblock(RAMBlock *block)
1754{
1755 if (block->flags & RAM_PREALLOC) {
1756 ;
1757 } else if (xen_enabled()) {
1758 xen_invalidate_map_cache_entry(block->host);
1759#ifndef _WIN32
1760 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001761 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001762 close(block->fd);
1763#endif
1764 } else {
1765 qemu_anon_ram_free(block->host, block->max_length);
1766 }
1767 g_free(block);
1768}
1769
Fam Zhengf1060c52016-03-01 14:18:22 +08001770void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001771{
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001772 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001773 QLIST_REMOVE_RCU(block, next);
1774 ram_list.mru_block = NULL;
1775 /* Write list before version */
1776 smp_wmb();
1777 ram_list.version++;
1778 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001779 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001780}
1781
Huang Yingcd19cfa2011-03-02 08:56:19 +01001782#ifndef _WIN32
1783void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1784{
1785 RAMBlock *block;
1786 ram_addr_t offset;
1787 int flags;
1788 void *area, *vaddr;
1789
Mike Day0dc3f442013-09-05 14:41:35 -04001790 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001791 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001792 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001793 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001794 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001795 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001796 } else if (xen_enabled()) {
1797 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001798 } else {
1799 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001800 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001801 flags |= (block->flags & RAM_SHARED ?
1802 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001803 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1804 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001805 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001806 /*
1807 * Remap needs to match alloc. Accelerators that
1808 * set phys_mem_alloc never remap. If they did,
1809 * we'd need a remap hook here.
1810 */
1811 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1812
Huang Yingcd19cfa2011-03-02 08:56:19 +01001813 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1814 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1815 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001816 }
1817 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001818 fprintf(stderr, "Could not remap addr: "
1819 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001820 length, addr);
1821 exit(1);
1822 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001823 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001824 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001825 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001826 }
1827 }
1828}
1829#endif /* !_WIN32 */
1830
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001831int qemu_get_ram_fd(ram_addr_t addr)
1832{
Mike Dayae3a7042013-09-05 14:41:35 -04001833 RAMBlock *block;
1834 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001835
Mike Day0dc3f442013-09-05 14:41:35 -04001836 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001837 block = qemu_get_ram_block(addr);
1838 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001839 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001840 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001841}
1842
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001843void qemu_set_ram_fd(ram_addr_t addr, int fd)
1844{
1845 RAMBlock *block;
1846
1847 rcu_read_lock();
1848 block = qemu_get_ram_block(addr);
1849 block->fd = fd;
1850 rcu_read_unlock();
1851}
1852
Damjan Marion3fd74b82014-06-26 23:01:32 +02001853void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1854{
Mike Dayae3a7042013-09-05 14:41:35 -04001855 RAMBlock *block;
1856 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001857
Mike Day0dc3f442013-09-05 14:41:35 -04001858 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001859 block = qemu_get_ram_block(addr);
1860 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001861 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001862 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001863}
1864
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001865/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001866 * This should not be used for general purpose DMA. Use address_space_map
1867 * or address_space_rw instead. For local memory (e.g. video ram) that the
1868 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001869 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001870 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001871 */
Gonglei3655cb92016-02-20 10:35:20 +08001872void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001873{
Gonglei3655cb92016-02-20 10:35:20 +08001874 RAMBlock *block = ram_block;
1875
1876 if (block == NULL) {
1877 block = qemu_get_ram_block(addr);
1878 }
Mike Dayae3a7042013-09-05 14:41:35 -04001879
1880 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001881 /* We need to check if the requested address is in the RAM
1882 * because we don't want to map the entire memory in QEMU.
1883 * In that case just map until the end of the page.
1884 */
1885 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001886 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001887 }
Mike Dayae3a7042013-09-05 14:41:35 -04001888
1889 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001890 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001891 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001892}
1893
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001894/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001895 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001896 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001897 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001898 */
Gonglei3655cb92016-02-20 10:35:20 +08001899static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1900 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001901{
Gonglei3655cb92016-02-20 10:35:20 +08001902 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001903 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001904 if (*size == 0) {
1905 return NULL;
1906 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001907
Gonglei3655cb92016-02-20 10:35:20 +08001908 if (block == NULL) {
1909 block = qemu_get_ram_block(addr);
1910 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001911 offset_inside_block = addr - block->offset;
1912 *size = MIN(*size, block->max_length - offset_inside_block);
1913
1914 if (xen_enabled() && block->host == NULL) {
1915 /* We need to check if the requested address is in the RAM
1916 * because we don't want to map the entire memory in QEMU.
1917 * In that case just map the requested area.
1918 */
1919 if (block->offset == 0) {
1920 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001921 }
1922
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001923 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001924 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001925
1926 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001927}
1928
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001929/*
1930 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1931 * in that RAMBlock.
1932 *
1933 * ptr: Host pointer to look up
1934 * round_offset: If true round the result offset down to a page boundary
1935 * *ram_addr: set to result ram_addr
1936 * *offset: set to result offset within the RAMBlock
1937 *
1938 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001939 *
1940 * By the time this function returns, the returned pointer is not protected
1941 * by RCU anymore. If the caller is not within an RCU critical section and
1942 * does not hold the iothread lock, it must have other means of protecting the
1943 * pointer, such as a reference to the region that includes the incoming
1944 * ram_addr_t.
1945 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001946RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1947 ram_addr_t *ram_addr,
1948 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001949{
pbrook94a6b542009-04-11 17:15:54 +00001950 RAMBlock *block;
1951 uint8_t *host = ptr;
1952
Jan Kiszka868bb332011-06-21 22:59:09 +02001953 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001954 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001955 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001956 block = qemu_get_ram_block(*ram_addr);
1957 if (block) {
1958 *offset = (host - block->host);
1959 }
Mike Day0dc3f442013-09-05 14:41:35 -04001960 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001961 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001962 }
1963
Mike Day0dc3f442013-09-05 14:41:35 -04001964 rcu_read_lock();
1965 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001966 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001967 goto found;
1968 }
1969
Mike Day0dc3f442013-09-05 14:41:35 -04001970 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001971 /* This case append when the block is not mapped. */
1972 if (block->host == NULL) {
1973 continue;
1974 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001975 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001976 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001977 }
pbrook94a6b542009-04-11 17:15:54 +00001978 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001979
Mike Day0dc3f442013-09-05 14:41:35 -04001980 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001981 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001982
1983found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001984 *offset = (host - block->host);
1985 if (round_offset) {
1986 *offset &= TARGET_PAGE_MASK;
1987 }
1988 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001989 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001990 return block;
1991}
1992
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001993/*
1994 * Finds the named RAMBlock
1995 *
1996 * name: The name of RAMBlock to find
1997 *
1998 * Returns: RAMBlock (or NULL if not found)
1999 */
2000RAMBlock *qemu_ram_block_by_name(const char *name)
2001{
2002 RAMBlock *block;
2003
2004 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2005 if (!strcmp(name, block->idstr)) {
2006 return block;
2007 }
2008 }
2009
2010 return NULL;
2011}
2012
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002013/* Some of the softmmu routines need to translate from a host pointer
2014 (typically a TLB entry) back to a ram offset. */
2015MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2016{
2017 RAMBlock *block;
2018 ram_addr_t offset; /* Not used */
2019
2020 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2021
2022 if (!block) {
2023 return NULL;
2024 }
2025
2026 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002027}
Alex Williamsonf471a172010-06-11 11:11:42 -06002028
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002029/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002030static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002031 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002032{
Juan Quintela52159192013-10-08 12:44:04 +02002033 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002034 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002035 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002036 switch (size) {
2037 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002038 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002039 break;
2040 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002041 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002042 break;
2043 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002044 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002045 break;
2046 default:
2047 abort();
2048 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002049 /* Set both VGA and migration bits for simplicity and to remove
2050 * the notdirty callback faster.
2051 */
2052 cpu_physical_memory_set_dirty_range(ram_addr, size,
2053 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002054 /* we remove the notdirty callback only if the code has been
2055 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002056 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002057 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002058 }
bellard1ccde1c2004-02-06 19:46:14 +00002059}
2060
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002061static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2062 unsigned size, bool is_write)
2063{
2064 return is_write;
2065}
2066
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002067static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002068 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002069 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002070 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002071};
2072
pbrook0f459d12008-06-09 00:20:13 +00002073/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002074static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002075{
Andreas Färber93afead2013-08-26 03:41:01 +02002076 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002077 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002078 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002079 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002080 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002081 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002082 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002083
Andreas Färberff4700b2013-08-26 18:23:18 +02002084 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002085 /* We re-entered the check after replacing the TB. Now raise
2086 * the debug interrupt so that is will trigger after the
2087 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002088 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002089 return;
2090 }
Andreas Färber93afead2013-08-26 03:41:01 +02002091 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002092 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002093 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2094 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002095 if (flags == BP_MEM_READ) {
2096 wp->flags |= BP_WATCHPOINT_HIT_READ;
2097 } else {
2098 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2099 }
2100 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002101 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002102 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002103 if (wp->flags & BP_CPU &&
2104 !cc->debug_check_watchpoint(cpu, wp)) {
2105 wp->flags &= ~BP_WATCHPOINT_HIT;
2106 continue;
2107 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002108 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002109 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002110 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002111 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002112 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002113 } else {
2114 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002115 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002116 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002117 }
aliguori06d55cc2008-11-18 20:24:06 +00002118 }
aliguori6e140f22008-11-18 20:37:55 +00002119 } else {
2120 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002121 }
2122 }
2123}
2124
pbrook6658ffb2007-03-16 23:58:11 +00002125/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2126 so these check for a hit then pass through to the normal out-of-line
2127 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002128static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2129 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002130{
Peter Maydell66b9b432015-04-26 16:49:24 +01002131 MemTxResult res;
2132 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002133 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2134 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002135
Peter Maydell66b9b432015-04-26 16:49:24 +01002136 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002137 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002138 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002139 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002140 break;
2141 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002142 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002143 break;
2144 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002145 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002146 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002147 default: abort();
2148 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002149 *pdata = data;
2150 return res;
2151}
2152
2153static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2154 uint64_t val, unsigned size,
2155 MemTxAttrs attrs)
2156{
2157 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002158 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2159 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002160
2161 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2162 switch (size) {
2163 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002164 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002165 break;
2166 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002167 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002168 break;
2169 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002170 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002171 break;
2172 default: abort();
2173 }
2174 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002175}
2176
Avi Kivity1ec9b902012-01-02 12:47:48 +02002177static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002178 .read_with_attrs = watch_mem_read,
2179 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002180 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002181};
pbrook6658ffb2007-03-16 23:58:11 +00002182
Peter Maydellf25a49e2015-04-26 16:49:24 +01002183static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2184 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002185{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002186 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002187 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002188 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002189
blueswir1db7b5422007-05-26 17:36:03 +00002190#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002191 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002192 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002193#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002194 res = address_space_read(subpage->as, addr + subpage->base,
2195 attrs, buf, len);
2196 if (res) {
2197 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002198 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002199 switch (len) {
2200 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002201 *data = ldub_p(buf);
2202 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002203 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002204 *data = lduw_p(buf);
2205 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002206 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002207 *data = ldl_p(buf);
2208 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002209 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002210 *data = ldq_p(buf);
2211 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002212 default:
2213 abort();
2214 }
blueswir1db7b5422007-05-26 17:36:03 +00002215}
2216
Peter Maydellf25a49e2015-04-26 16:49:24 +01002217static MemTxResult subpage_write(void *opaque, hwaddr addr,
2218 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002219{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002220 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002221 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002222
blueswir1db7b5422007-05-26 17:36:03 +00002223#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002224 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002225 " value %"PRIx64"\n",
2226 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002227#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002228 switch (len) {
2229 case 1:
2230 stb_p(buf, value);
2231 break;
2232 case 2:
2233 stw_p(buf, value);
2234 break;
2235 case 4:
2236 stl_p(buf, value);
2237 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002238 case 8:
2239 stq_p(buf, value);
2240 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002241 default:
2242 abort();
2243 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002244 return address_space_write(subpage->as, addr + subpage->base,
2245 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002246}
2247
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002248static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002249 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002250{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002251 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002252#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002253 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002254 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002255#endif
2256
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002257 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002258 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002259}
2260
Avi Kivity70c68e42012-01-02 12:32:48 +02002261static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002262 .read_with_attrs = subpage_read,
2263 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002264 .impl.min_access_size = 1,
2265 .impl.max_access_size = 8,
2266 .valid.min_access_size = 1,
2267 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002268 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002269 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002270};
2271
Anthony Liguoric227f092009-10-01 16:12:16 -05002272static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002273 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002274{
2275 int idx, eidx;
2276
2277 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2278 return -1;
2279 idx = SUBPAGE_IDX(start);
2280 eidx = SUBPAGE_IDX(end);
2281#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002282 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2283 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002284#endif
blueswir1db7b5422007-05-26 17:36:03 +00002285 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002286 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002287 }
2288
2289 return 0;
2290}
2291
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002292static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002293{
Anthony Liguoric227f092009-10-01 16:12:16 -05002294 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002295
Anthony Liguori7267c092011-08-20 22:09:37 -05002296 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002297
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002298 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002299 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002300 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002301 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002302 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002303#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002304 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2305 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002306#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002307 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002308
2309 return mmio;
2310}
2311
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002312static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2313 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002314{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002315 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002316 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002317 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002318 .mr = mr,
2319 .offset_within_address_space = 0,
2320 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002321 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002322 };
2323
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002324 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002325}
2326
Peter Maydella54c87b2016-01-21 14:15:05 +00002327MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002328{
Peter Maydella54c87b2016-01-21 14:15:05 +00002329 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2330 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002331 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002332 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002333
2334 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002335}
2336
Avi Kivitye9179ce2009-06-14 11:38:52 +03002337static void io_mem_init(void)
2338{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002339 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002340 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002341 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002342 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002343 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002344 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002345 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002346}
2347
Avi Kivityac1970f2012-10-03 16:22:53 +02002348static void mem_begin(MemoryListener *listener)
2349{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002350 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002351 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2352 uint16_t n;
2353
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002354 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002355 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002356 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002357 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002358 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002359 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002360 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002361 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002362
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002363 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002364 d->as = as;
2365 as->next_dispatch = d;
2366}
2367
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002368static void address_space_dispatch_free(AddressSpaceDispatch *d)
2369{
2370 phys_sections_free(&d->map);
2371 g_free(d);
2372}
2373
Paolo Bonzini00752702013-05-29 12:13:54 +02002374static void mem_commit(MemoryListener *listener)
2375{
2376 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002377 AddressSpaceDispatch *cur = as->dispatch;
2378 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002379
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002380 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002381
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002382 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002383 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002384 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002385 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002386}
2387
Avi Kivity1d711482012-10-02 18:54:45 +02002388static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002389{
Peter Maydell32857f42015-10-01 15:29:50 +01002390 CPUAddressSpace *cpuas;
2391 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002392
2393 /* since each CPU stores ram addresses in its TLB cache, we must
2394 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002395 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2396 cpu_reloading_memory_map();
2397 /* The CPU and TLB are protected by the iothread lock.
2398 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2399 * may have split the RCU critical section.
2400 */
2401 d = atomic_rcu_read(&cpuas->as->dispatch);
2402 cpuas->memory_dispatch = d;
2403 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002404}
2405
Avi Kivityac1970f2012-10-03 16:22:53 +02002406void address_space_init_dispatch(AddressSpace *as)
2407{
Paolo Bonzini00752702013-05-29 12:13:54 +02002408 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002409 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002410 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002411 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002412 .region_add = mem_add,
2413 .region_nop = mem_add,
2414 .priority = 0,
2415 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002416 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002417}
2418
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002419void address_space_unregister(AddressSpace *as)
2420{
2421 memory_listener_unregister(&as->dispatch_listener);
2422}
2423
Avi Kivity83f3c252012-10-07 12:59:55 +02002424void address_space_destroy_dispatch(AddressSpace *as)
2425{
2426 AddressSpaceDispatch *d = as->dispatch;
2427
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002428 atomic_rcu_set(&as->dispatch, NULL);
2429 if (d) {
2430 call_rcu(d, address_space_dispatch_free, rcu);
2431 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002432}
2433
Avi Kivity62152b82011-07-26 14:26:14 +03002434static void memory_map_init(void)
2435{
Anthony Liguori7267c092011-08-20 22:09:37 -05002436 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002437
Paolo Bonzini57271d62013-11-07 17:14:37 +01002438 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002439 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002440
Anthony Liguori7267c092011-08-20 22:09:37 -05002441 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002442 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2443 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002444 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002445}
2446
2447MemoryRegion *get_system_memory(void)
2448{
2449 return system_memory;
2450}
2451
Avi Kivity309cb472011-08-08 16:09:03 +03002452MemoryRegion *get_system_io(void)
2453{
2454 return system_io;
2455}
2456
pbrooke2eef172008-06-08 01:09:01 +00002457#endif /* !defined(CONFIG_USER_ONLY) */
2458
bellard13eb76e2004-01-24 15:23:36 +00002459/* physical memory access (slow version, mainly for debug) */
2460#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002461int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002462 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002463{
2464 int l, flags;
2465 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002466 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002467
2468 while (len > 0) {
2469 page = addr & TARGET_PAGE_MASK;
2470 l = (page + TARGET_PAGE_SIZE) - addr;
2471 if (l > len)
2472 l = len;
2473 flags = page_get_flags(page);
2474 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002475 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002476 if (is_write) {
2477 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002478 return -1;
bellard579a97f2007-11-11 14:26:47 +00002479 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002480 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002481 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002482 memcpy(p, buf, l);
2483 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002484 } else {
2485 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002486 return -1;
bellard579a97f2007-11-11 14:26:47 +00002487 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002488 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002489 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002490 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002491 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002492 }
2493 len -= l;
2494 buf += l;
2495 addr += l;
2496 }
Paul Brooka68fe892010-03-01 00:08:59 +00002497 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002498}
bellard8df1cd02005-01-28 22:37:22 +00002499
bellard13eb76e2004-01-24 15:23:36 +00002500#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002501
Paolo Bonzini845b6212015-03-23 11:45:53 +01002502static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002503 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002504{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002505 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2506 /* No early return if dirty_log_mask is or becomes 0, because
2507 * cpu_physical_memory_set_dirty_range will still call
2508 * xen_modified_memory.
2509 */
2510 if (dirty_log_mask) {
2511 dirty_log_mask =
2512 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002513 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002514 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2515 tb_invalidate_phys_range(addr, addr + length);
2516 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2517 }
2518 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002519}
2520
Richard Henderson23326162013-07-08 14:55:59 -07002521static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002522{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002523 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002524
2525 /* Regions are assumed to support 1-4 byte accesses unless
2526 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002527 if (access_size_max == 0) {
2528 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002529 }
Richard Henderson23326162013-07-08 14:55:59 -07002530
2531 /* Bound the maximum access by the alignment of the address. */
2532 if (!mr->ops->impl.unaligned) {
2533 unsigned align_size_max = addr & -addr;
2534 if (align_size_max != 0 && align_size_max < access_size_max) {
2535 access_size_max = align_size_max;
2536 }
2537 }
2538
2539 /* Don't attempt accesses larger than the maximum. */
2540 if (l > access_size_max) {
2541 l = access_size_max;
2542 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002543 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002544
2545 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002546}
2547
Jan Kiszka4840f102015-06-18 18:47:22 +02002548static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002549{
Jan Kiszka4840f102015-06-18 18:47:22 +02002550 bool unlocked = !qemu_mutex_iothread_locked();
2551 bool release_lock = false;
2552
2553 if (unlocked && mr->global_locking) {
2554 qemu_mutex_lock_iothread();
2555 unlocked = false;
2556 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002557 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002558 if (mr->flush_coalesced_mmio) {
2559 if (unlocked) {
2560 qemu_mutex_lock_iothread();
2561 }
2562 qemu_flush_coalesced_mmio_buffer();
2563 if (unlocked) {
2564 qemu_mutex_unlock_iothread();
2565 }
2566 }
2567
2568 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002569}
2570
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002571/* Called within RCU critical section. */
2572static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2573 MemTxAttrs attrs,
2574 const uint8_t *buf,
2575 int len, hwaddr addr1,
2576 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002577{
bellard13eb76e2004-01-24 15:23:36 +00002578 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002579 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002580 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002581 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002582
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002583 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002584 if (!memory_access_is_direct(mr, true)) {
2585 release_lock |= prepare_mmio_access(mr);
2586 l = memory_access_size(mr, l, addr1);
2587 /* XXX: could force current_cpu to NULL to avoid
2588 potential bugs */
2589 switch (l) {
2590 case 8:
2591 /* 64 bit write access */
2592 val = ldq_p(buf);
2593 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2594 attrs);
2595 break;
2596 case 4:
2597 /* 32 bit write access */
2598 val = ldl_p(buf);
2599 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2600 attrs);
2601 break;
2602 case 2:
2603 /* 16 bit write access */
2604 val = lduw_p(buf);
2605 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2606 attrs);
2607 break;
2608 case 1:
2609 /* 8 bit write access */
2610 val = ldub_p(buf);
2611 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2612 attrs);
2613 break;
2614 default:
2615 abort();
bellard13eb76e2004-01-24 15:23:36 +00002616 }
2617 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002618 addr1 += memory_region_get_ram_addr(mr);
2619 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002620 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002621 memcpy(ptr, buf, l);
2622 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002623 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002624
2625 if (release_lock) {
2626 qemu_mutex_unlock_iothread();
2627 release_lock = false;
2628 }
2629
bellard13eb76e2004-01-24 15:23:36 +00002630 len -= l;
2631 buf += l;
2632 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002633
2634 if (!len) {
2635 break;
2636 }
2637
2638 l = len;
2639 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002640 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002641
Peter Maydell3b643492015-04-26 16:49:23 +01002642 return result;
bellard13eb76e2004-01-24 15:23:36 +00002643}
bellard8df1cd02005-01-28 22:37:22 +00002644
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002645MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2646 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002647{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002648 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002649 hwaddr addr1;
2650 MemoryRegion *mr;
2651 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002652
2653 if (len > 0) {
2654 rcu_read_lock();
2655 l = len;
2656 mr = address_space_translate(as, addr, &addr1, &l, true);
2657 result = address_space_write_continue(as, addr, attrs, buf, len,
2658 addr1, l, mr);
2659 rcu_read_unlock();
2660 }
2661
2662 return result;
2663}
2664
2665/* Called within RCU critical section. */
2666MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2667 MemTxAttrs attrs, uint8_t *buf,
2668 int len, hwaddr addr1, hwaddr l,
2669 MemoryRegion *mr)
2670{
2671 uint8_t *ptr;
2672 uint64_t val;
2673 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002674 bool release_lock = false;
2675
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002676 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002677 if (!memory_access_is_direct(mr, false)) {
2678 /* I/O case */
2679 release_lock |= prepare_mmio_access(mr);
2680 l = memory_access_size(mr, l, addr1);
2681 switch (l) {
2682 case 8:
2683 /* 64 bit read access */
2684 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2685 attrs);
2686 stq_p(buf, val);
2687 break;
2688 case 4:
2689 /* 32 bit read access */
2690 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2691 attrs);
2692 stl_p(buf, val);
2693 break;
2694 case 2:
2695 /* 16 bit read access */
2696 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2697 attrs);
2698 stw_p(buf, val);
2699 break;
2700 case 1:
2701 /* 8 bit read access */
2702 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2703 attrs);
2704 stb_p(buf, val);
2705 break;
2706 default:
2707 abort();
2708 }
2709 } else {
2710 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002711 ptr = qemu_get_ram_ptr(mr->ram_block,
2712 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002713 memcpy(buf, ptr, l);
2714 }
2715
2716 if (release_lock) {
2717 qemu_mutex_unlock_iothread();
2718 release_lock = false;
2719 }
2720
2721 len -= l;
2722 buf += l;
2723 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002724
2725 if (!len) {
2726 break;
2727 }
2728
2729 l = len;
2730 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002731 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002732
2733 return result;
2734}
2735
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002736MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2737 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002738{
2739 hwaddr l;
2740 hwaddr addr1;
2741 MemoryRegion *mr;
2742 MemTxResult result = MEMTX_OK;
2743
2744 if (len > 0) {
2745 rcu_read_lock();
2746 l = len;
2747 mr = address_space_translate(as, addr, &addr1, &l, false);
2748 result = address_space_read_continue(as, addr, attrs, buf, len,
2749 addr1, l, mr);
2750 rcu_read_unlock();
2751 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002752
2753 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002754}
2755
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002756MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2757 uint8_t *buf, int len, bool is_write)
2758{
2759 if (is_write) {
2760 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2761 } else {
2762 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2763 }
2764}
Avi Kivityac1970f2012-10-03 16:22:53 +02002765
Avi Kivitya8170e52012-10-23 12:30:10 +02002766void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002767 int len, int is_write)
2768{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002769 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2770 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002771}
2772
Alexander Graf582b55a2013-12-11 14:17:44 +01002773enum write_rom_type {
2774 WRITE_DATA,
2775 FLUSH_CACHE,
2776};
2777
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002778static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002779 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002780{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002781 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002782 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002783 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002784 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002785
Paolo Bonzini41063e12015-03-18 14:21:43 +01002786 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002787 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002788 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002789 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002790
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002791 if (!(memory_region_is_ram(mr) ||
2792 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002793 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002794 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002795 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002796 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002797 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002798 switch (type) {
2799 case WRITE_DATA:
2800 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002801 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002802 break;
2803 case FLUSH_CACHE:
2804 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2805 break;
2806 }
bellardd0ecd2a2006-04-23 17:14:48 +00002807 }
2808 len -= l;
2809 buf += l;
2810 addr += l;
2811 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002812 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002813}
2814
Alexander Graf582b55a2013-12-11 14:17:44 +01002815/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002816void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002817 const uint8_t *buf, int len)
2818{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002819 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002820}
2821
2822void cpu_flush_icache_range(hwaddr start, int len)
2823{
2824 /*
2825 * This function should do the same thing as an icache flush that was
2826 * triggered from within the guest. For TCG we are always cache coherent,
2827 * so there is no need to flush anything. For KVM / Xen we need to flush
2828 * the host's instruction cache at least.
2829 */
2830 if (tcg_enabled()) {
2831 return;
2832 }
2833
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002834 cpu_physical_memory_write_rom_internal(&address_space_memory,
2835 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002836}
2837
aliguori6d16c2f2009-01-22 16:59:11 +00002838typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002839 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002840 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002841 hwaddr addr;
2842 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002843 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002844} BounceBuffer;
2845
2846static BounceBuffer bounce;
2847
aliguoriba223c22009-01-22 16:59:16 +00002848typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002849 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002850 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002851} MapClient;
2852
Fam Zheng38e047b2015-03-16 17:03:35 +08002853QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002854static QLIST_HEAD(map_client_list, MapClient) map_client_list
2855 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002856
Fam Zhenge95205e2015-03-16 17:03:37 +08002857static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002858{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002859 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002860 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002861}
2862
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002863static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002864{
2865 MapClient *client;
2866
Blue Swirl72cf2d42009-09-12 07:36:22 +00002867 while (!QLIST_EMPTY(&map_client_list)) {
2868 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002869 qemu_bh_schedule(client->bh);
2870 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002871 }
2872}
2873
Fam Zhenge95205e2015-03-16 17:03:37 +08002874void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002875{
2876 MapClient *client = g_malloc(sizeof(*client));
2877
Fam Zheng38e047b2015-03-16 17:03:35 +08002878 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002879 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002880 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002881 if (!atomic_read(&bounce.in_use)) {
2882 cpu_notify_map_clients_locked();
2883 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002884 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002885}
2886
Fam Zheng38e047b2015-03-16 17:03:35 +08002887void cpu_exec_init_all(void)
2888{
2889 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002890 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002891 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002892 qemu_mutex_init(&map_client_list_lock);
2893}
2894
Fam Zhenge95205e2015-03-16 17:03:37 +08002895void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002896{
Fam Zhenge95205e2015-03-16 17:03:37 +08002897 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002898
Fam Zhenge95205e2015-03-16 17:03:37 +08002899 qemu_mutex_lock(&map_client_list_lock);
2900 QLIST_FOREACH(client, &map_client_list, link) {
2901 if (client->bh == bh) {
2902 cpu_unregister_map_client_do(client);
2903 break;
2904 }
2905 }
2906 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002907}
2908
2909static void cpu_notify_map_clients(void)
2910{
Fam Zheng38e047b2015-03-16 17:03:35 +08002911 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002912 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002913 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002914}
2915
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002916bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2917{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002918 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002919 hwaddr l, xlat;
2920
Paolo Bonzini41063e12015-03-18 14:21:43 +01002921 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002922 while (len > 0) {
2923 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002924 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2925 if (!memory_access_is_direct(mr, is_write)) {
2926 l = memory_access_size(mr, l, addr);
2927 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002928 return false;
2929 }
2930 }
2931
2932 len -= l;
2933 addr += l;
2934 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002935 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002936 return true;
2937}
2938
aliguori6d16c2f2009-01-22 16:59:11 +00002939/* Map a physical memory region into a host virtual address.
2940 * May map a subset of the requested range, given by and returned in *plen.
2941 * May return NULL if resources needed to perform the mapping are exhausted.
2942 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002943 * Use cpu_register_map_client() to know when retrying the map operation is
2944 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002945 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002946void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002947 hwaddr addr,
2948 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002949 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002950{
Avi Kivitya8170e52012-10-23 12:30:10 +02002951 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002952 hwaddr done = 0;
2953 hwaddr l, xlat, base;
2954 MemoryRegion *mr, *this_mr;
2955 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002956 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002957
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002958 if (len == 0) {
2959 return NULL;
2960 }
aliguori6d16c2f2009-01-22 16:59:11 +00002961
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002962 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002963 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002964 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002965
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002966 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002967 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002968 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002969 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002970 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002971 /* Avoid unbounded allocations */
2972 l = MIN(l, TARGET_PAGE_SIZE);
2973 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002974 bounce.addr = addr;
2975 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002976
2977 memory_region_ref(mr);
2978 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002979 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002980 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2981 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002982 }
aliguori6d16c2f2009-01-22 16:59:11 +00002983
Paolo Bonzini41063e12015-03-18 14:21:43 +01002984 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002985 *plen = l;
2986 return bounce.buffer;
2987 }
2988
2989 base = xlat;
2990 raddr = memory_region_get_ram_addr(mr);
2991
2992 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002993 len -= l;
2994 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002995 done += l;
2996 if (len == 0) {
2997 break;
2998 }
2999
3000 l = len;
3001 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3002 if (this_mr != mr || xlat != base + done) {
3003 break;
3004 }
aliguori6d16c2f2009-01-22 16:59:11 +00003005 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003006
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003007 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003008 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003009 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003010 rcu_read_unlock();
3011
3012 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003013}
3014
Avi Kivityac1970f2012-10-03 16:22:53 +02003015/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003016 * Will also mark the memory as dirty if is_write == 1. access_len gives
3017 * the amount of memory that was actually read or written by the caller.
3018 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003019void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3020 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003021{
3022 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003023 MemoryRegion *mr;
3024 ram_addr_t addr1;
3025
3026 mr = qemu_ram_addr_from_host(buffer, &addr1);
3027 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003028 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003029 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003030 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003031 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003032 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003033 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003034 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003035 return;
3036 }
3037 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003038 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3039 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003040 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003041 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003042 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003043 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003044 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003045 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003046}
bellardd0ecd2a2006-04-23 17:14:48 +00003047
Avi Kivitya8170e52012-10-23 12:30:10 +02003048void *cpu_physical_memory_map(hwaddr addr,
3049 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003050 int is_write)
3051{
3052 return address_space_map(&address_space_memory, addr, plen, is_write);
3053}
3054
Avi Kivitya8170e52012-10-23 12:30:10 +02003055void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3056 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003057{
3058 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3059}
3060
bellard8df1cd02005-01-28 22:37:22 +00003061/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003062static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3063 MemTxAttrs attrs,
3064 MemTxResult *result,
3065 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003066{
bellard8df1cd02005-01-28 22:37:22 +00003067 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003068 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003069 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003070 hwaddr l = 4;
3071 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003072 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003073 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003074
Paolo Bonzini41063e12015-03-18 14:21:43 +01003075 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003076 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003077 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003078 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003079
bellard8df1cd02005-01-28 22:37:22 +00003080 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003081 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003082#if defined(TARGET_WORDS_BIGENDIAN)
3083 if (endian == DEVICE_LITTLE_ENDIAN) {
3084 val = bswap32(val);
3085 }
3086#else
3087 if (endian == DEVICE_BIG_ENDIAN) {
3088 val = bswap32(val);
3089 }
3090#endif
bellard8df1cd02005-01-28 22:37:22 +00003091 } else {
3092 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003093 ptr = qemu_get_ram_ptr(mr->ram_block,
3094 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003095 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003096 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003097 switch (endian) {
3098 case DEVICE_LITTLE_ENDIAN:
3099 val = ldl_le_p(ptr);
3100 break;
3101 case DEVICE_BIG_ENDIAN:
3102 val = ldl_be_p(ptr);
3103 break;
3104 default:
3105 val = ldl_p(ptr);
3106 break;
3107 }
Peter Maydell50013112015-04-26 16:49:24 +01003108 r = MEMTX_OK;
3109 }
3110 if (result) {
3111 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003112 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003113 if (release_lock) {
3114 qemu_mutex_unlock_iothread();
3115 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003116 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003117 return val;
3118}
3119
Peter Maydell50013112015-04-26 16:49:24 +01003120uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3121 MemTxAttrs attrs, MemTxResult *result)
3122{
3123 return address_space_ldl_internal(as, addr, attrs, result,
3124 DEVICE_NATIVE_ENDIAN);
3125}
3126
3127uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3128 MemTxAttrs attrs, MemTxResult *result)
3129{
3130 return address_space_ldl_internal(as, addr, attrs, result,
3131 DEVICE_LITTLE_ENDIAN);
3132}
3133
3134uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3135 MemTxAttrs attrs, MemTxResult *result)
3136{
3137 return address_space_ldl_internal(as, addr, attrs, result,
3138 DEVICE_BIG_ENDIAN);
3139}
3140
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003141uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003142{
Peter Maydell50013112015-04-26 16:49:24 +01003143 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003144}
3145
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003146uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003147{
Peter Maydell50013112015-04-26 16:49:24 +01003148 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003149}
3150
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003151uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003152{
Peter Maydell50013112015-04-26 16:49:24 +01003153 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003154}
3155
bellard84b7b8e2005-11-28 21:19:04 +00003156/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003157static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3158 MemTxAttrs attrs,
3159 MemTxResult *result,
3160 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003161{
bellard84b7b8e2005-11-28 21:19:04 +00003162 uint8_t *ptr;
3163 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003164 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003165 hwaddr l = 8;
3166 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003167 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003168 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003169
Paolo Bonzini41063e12015-03-18 14:21:43 +01003170 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003171 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003172 false);
3173 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003174 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003175
bellard84b7b8e2005-11-28 21:19:04 +00003176 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003177 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003178#if defined(TARGET_WORDS_BIGENDIAN)
3179 if (endian == DEVICE_LITTLE_ENDIAN) {
3180 val = bswap64(val);
3181 }
3182#else
3183 if (endian == DEVICE_BIG_ENDIAN) {
3184 val = bswap64(val);
3185 }
3186#endif
bellard84b7b8e2005-11-28 21:19:04 +00003187 } else {
3188 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003189 ptr = qemu_get_ram_ptr(mr->ram_block,
3190 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003191 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003192 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003193 switch (endian) {
3194 case DEVICE_LITTLE_ENDIAN:
3195 val = ldq_le_p(ptr);
3196 break;
3197 case DEVICE_BIG_ENDIAN:
3198 val = ldq_be_p(ptr);
3199 break;
3200 default:
3201 val = ldq_p(ptr);
3202 break;
3203 }
Peter Maydell50013112015-04-26 16:49:24 +01003204 r = MEMTX_OK;
3205 }
3206 if (result) {
3207 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003208 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003209 if (release_lock) {
3210 qemu_mutex_unlock_iothread();
3211 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003212 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003213 return val;
3214}
3215
Peter Maydell50013112015-04-26 16:49:24 +01003216uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3217 MemTxAttrs attrs, MemTxResult *result)
3218{
3219 return address_space_ldq_internal(as, addr, attrs, result,
3220 DEVICE_NATIVE_ENDIAN);
3221}
3222
3223uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3224 MemTxAttrs attrs, MemTxResult *result)
3225{
3226 return address_space_ldq_internal(as, addr, attrs, result,
3227 DEVICE_LITTLE_ENDIAN);
3228}
3229
3230uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3231 MemTxAttrs attrs, MemTxResult *result)
3232{
3233 return address_space_ldq_internal(as, addr, attrs, result,
3234 DEVICE_BIG_ENDIAN);
3235}
3236
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003237uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003238{
Peter Maydell50013112015-04-26 16:49:24 +01003239 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003240}
3241
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003242uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003243{
Peter Maydell50013112015-04-26 16:49:24 +01003244 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003245}
3246
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003247uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248{
Peter Maydell50013112015-04-26 16:49:24 +01003249 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250}
3251
bellardaab33092005-10-30 20:48:42 +00003252/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003253uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3254 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003255{
3256 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003257 MemTxResult r;
3258
3259 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3260 if (result) {
3261 *result = r;
3262 }
bellardaab33092005-10-30 20:48:42 +00003263 return val;
3264}
3265
Peter Maydell50013112015-04-26 16:49:24 +01003266uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3267{
3268 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3269}
3270
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003271/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003272static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3273 hwaddr addr,
3274 MemTxAttrs attrs,
3275 MemTxResult *result,
3276 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003277{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003278 uint8_t *ptr;
3279 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003280 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003281 hwaddr l = 2;
3282 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003283 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003284 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003285
Paolo Bonzini41063e12015-03-18 14:21:43 +01003286 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003287 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003288 false);
3289 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003290 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003291
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003292 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003293 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003294#if defined(TARGET_WORDS_BIGENDIAN)
3295 if (endian == DEVICE_LITTLE_ENDIAN) {
3296 val = bswap16(val);
3297 }
3298#else
3299 if (endian == DEVICE_BIG_ENDIAN) {
3300 val = bswap16(val);
3301 }
3302#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003303 } else {
3304 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003305 ptr = qemu_get_ram_ptr(mr->ram_block,
3306 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003307 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003308 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003309 switch (endian) {
3310 case DEVICE_LITTLE_ENDIAN:
3311 val = lduw_le_p(ptr);
3312 break;
3313 case DEVICE_BIG_ENDIAN:
3314 val = lduw_be_p(ptr);
3315 break;
3316 default:
3317 val = lduw_p(ptr);
3318 break;
3319 }
Peter Maydell50013112015-04-26 16:49:24 +01003320 r = MEMTX_OK;
3321 }
3322 if (result) {
3323 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003324 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003325 if (release_lock) {
3326 qemu_mutex_unlock_iothread();
3327 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003328 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003329 return val;
bellardaab33092005-10-30 20:48:42 +00003330}
3331
Peter Maydell50013112015-04-26 16:49:24 +01003332uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3333 MemTxAttrs attrs, MemTxResult *result)
3334{
3335 return address_space_lduw_internal(as, addr, attrs, result,
3336 DEVICE_NATIVE_ENDIAN);
3337}
3338
3339uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3340 MemTxAttrs attrs, MemTxResult *result)
3341{
3342 return address_space_lduw_internal(as, addr, attrs, result,
3343 DEVICE_LITTLE_ENDIAN);
3344}
3345
3346uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3347 MemTxAttrs attrs, MemTxResult *result)
3348{
3349 return address_space_lduw_internal(as, addr, attrs, result,
3350 DEVICE_BIG_ENDIAN);
3351}
3352
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003353uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003354{
Peter Maydell50013112015-04-26 16:49:24 +01003355 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003356}
3357
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003358uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003359{
Peter Maydell50013112015-04-26 16:49:24 +01003360 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003361}
3362
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003363uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003364{
Peter Maydell50013112015-04-26 16:49:24 +01003365 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003366}
3367
bellard8df1cd02005-01-28 22:37:22 +00003368/* warning: addr must be aligned. The ram page is not masked as dirty
3369 and the code inside is not invalidated. It is useful if the dirty
3370 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003371void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3372 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003373{
bellard8df1cd02005-01-28 22:37:22 +00003374 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003375 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003376 hwaddr l = 4;
3377 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003378 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003379 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003380 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003381
Paolo Bonzini41063e12015-03-18 14:21:43 +01003382 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003383 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003384 true);
3385 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003386 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003387
Peter Maydell50013112015-04-26 16:49:24 +01003388 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003389 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003390 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003391 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003392 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003393
Paolo Bonzini845b6212015-03-23 11:45:53 +01003394 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3395 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003396 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003397 r = MEMTX_OK;
3398 }
3399 if (result) {
3400 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003401 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003402 if (release_lock) {
3403 qemu_mutex_unlock_iothread();
3404 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003405 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003406}
3407
Peter Maydell50013112015-04-26 16:49:24 +01003408void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3409{
3410 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3411}
3412
bellard8df1cd02005-01-28 22:37:22 +00003413/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003414static inline void address_space_stl_internal(AddressSpace *as,
3415 hwaddr addr, uint32_t val,
3416 MemTxAttrs attrs,
3417 MemTxResult *result,
3418 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003419{
bellard8df1cd02005-01-28 22:37:22 +00003420 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003421 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003422 hwaddr l = 4;
3423 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003424 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003425 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003426
Paolo Bonzini41063e12015-03-18 14:21:43 +01003427 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003428 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003429 true);
3430 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003431 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003432
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003433#if defined(TARGET_WORDS_BIGENDIAN)
3434 if (endian == DEVICE_LITTLE_ENDIAN) {
3435 val = bswap32(val);
3436 }
3437#else
3438 if (endian == DEVICE_BIG_ENDIAN) {
3439 val = bswap32(val);
3440 }
3441#endif
Peter Maydell50013112015-04-26 16:49:24 +01003442 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003443 } else {
bellard8df1cd02005-01-28 22:37:22 +00003444 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003445 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003446 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003447 switch (endian) {
3448 case DEVICE_LITTLE_ENDIAN:
3449 stl_le_p(ptr, val);
3450 break;
3451 case DEVICE_BIG_ENDIAN:
3452 stl_be_p(ptr, val);
3453 break;
3454 default:
3455 stl_p(ptr, val);
3456 break;
3457 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003458 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003459 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003460 }
Peter Maydell50013112015-04-26 16:49:24 +01003461 if (result) {
3462 *result = r;
3463 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003464 if (release_lock) {
3465 qemu_mutex_unlock_iothread();
3466 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003467 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003468}
3469
3470void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3471 MemTxAttrs attrs, MemTxResult *result)
3472{
3473 address_space_stl_internal(as, addr, val, attrs, result,
3474 DEVICE_NATIVE_ENDIAN);
3475}
3476
3477void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3478 MemTxAttrs attrs, MemTxResult *result)
3479{
3480 address_space_stl_internal(as, addr, val, attrs, result,
3481 DEVICE_LITTLE_ENDIAN);
3482}
3483
3484void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3485 MemTxAttrs attrs, MemTxResult *result)
3486{
3487 address_space_stl_internal(as, addr, val, attrs, result,
3488 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003489}
3490
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003491void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003492{
Peter Maydell50013112015-04-26 16:49:24 +01003493 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003494}
3495
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003496void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003497{
Peter Maydell50013112015-04-26 16:49:24 +01003498 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003499}
3500
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003501void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502{
Peter Maydell50013112015-04-26 16:49:24 +01003503 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003504}
3505
bellardaab33092005-10-30 20:48:42 +00003506/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003507void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3508 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003509{
3510 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003511 MemTxResult r;
3512
3513 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3514 if (result) {
3515 *result = r;
3516 }
3517}
3518
3519void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3520{
3521 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003522}
3523
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003524/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003525static inline void address_space_stw_internal(AddressSpace *as,
3526 hwaddr addr, uint32_t val,
3527 MemTxAttrs attrs,
3528 MemTxResult *result,
3529 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003530{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003531 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003532 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003533 hwaddr l = 2;
3534 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003535 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003536 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003537
Paolo Bonzini41063e12015-03-18 14:21:43 +01003538 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003539 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003540 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003541 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003542
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003543#if defined(TARGET_WORDS_BIGENDIAN)
3544 if (endian == DEVICE_LITTLE_ENDIAN) {
3545 val = bswap16(val);
3546 }
3547#else
3548 if (endian == DEVICE_BIG_ENDIAN) {
3549 val = bswap16(val);
3550 }
3551#endif
Peter Maydell50013112015-04-26 16:49:24 +01003552 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003553 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003554 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003555 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003556 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003557 switch (endian) {
3558 case DEVICE_LITTLE_ENDIAN:
3559 stw_le_p(ptr, val);
3560 break;
3561 case DEVICE_BIG_ENDIAN:
3562 stw_be_p(ptr, val);
3563 break;
3564 default:
3565 stw_p(ptr, val);
3566 break;
3567 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003568 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003569 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003570 }
Peter Maydell50013112015-04-26 16:49:24 +01003571 if (result) {
3572 *result = r;
3573 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003574 if (release_lock) {
3575 qemu_mutex_unlock_iothread();
3576 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003577 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003578}
3579
3580void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3581 MemTxAttrs attrs, MemTxResult *result)
3582{
3583 address_space_stw_internal(as, addr, val, attrs, result,
3584 DEVICE_NATIVE_ENDIAN);
3585}
3586
3587void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3588 MemTxAttrs attrs, MemTxResult *result)
3589{
3590 address_space_stw_internal(as, addr, val, attrs, result,
3591 DEVICE_LITTLE_ENDIAN);
3592}
3593
3594void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3595 MemTxAttrs attrs, MemTxResult *result)
3596{
3597 address_space_stw_internal(as, addr, val, attrs, result,
3598 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003599}
3600
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003601void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003602{
Peter Maydell50013112015-04-26 16:49:24 +01003603 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003604}
3605
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003606void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003607{
Peter Maydell50013112015-04-26 16:49:24 +01003608 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003609}
3610
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003611void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003612{
Peter Maydell50013112015-04-26 16:49:24 +01003613 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003614}
3615
bellardaab33092005-10-30 20:48:42 +00003616/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003617void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3618 MemTxAttrs attrs, MemTxResult *result)
3619{
3620 MemTxResult r;
3621 val = tswap64(val);
3622 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3623 if (result) {
3624 *result = r;
3625 }
3626}
3627
3628void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3629 MemTxAttrs attrs, MemTxResult *result)
3630{
3631 MemTxResult r;
3632 val = cpu_to_le64(val);
3633 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3634 if (result) {
3635 *result = r;
3636 }
3637}
3638void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3639 MemTxAttrs attrs, MemTxResult *result)
3640{
3641 MemTxResult r;
3642 val = cpu_to_be64(val);
3643 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3644 if (result) {
3645 *result = r;
3646 }
3647}
3648
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003649void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003650{
Peter Maydell50013112015-04-26 16:49:24 +01003651 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003652}
3653
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003654void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003655{
Peter Maydell50013112015-04-26 16:49:24 +01003656 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003657}
3658
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003659void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003660{
Peter Maydell50013112015-04-26 16:49:24 +01003661 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003662}
3663
aliguori5e2972f2009-03-28 17:51:36 +00003664/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003665int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003666 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003667{
3668 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003669 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003670 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003671
3672 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003673 int asidx;
3674 MemTxAttrs attrs;
3675
bellard13eb76e2004-01-24 15:23:36 +00003676 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003677 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3678 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003679 /* if no physical page mapped, return an error */
3680 if (phys_addr == -1)
3681 return -1;
3682 l = (page + TARGET_PAGE_SIZE) - addr;
3683 if (l > len)
3684 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003685 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003686 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003687 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3688 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003689 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003690 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3691 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003692 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003693 }
bellard13eb76e2004-01-24 15:23:36 +00003694 len -= l;
3695 buf += l;
3696 addr += l;
3697 }
3698 return 0;
3699}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003700
3701/*
3702 * Allows code that needs to deal with migration bitmaps etc to still be built
3703 * target independent.
3704 */
3705size_t qemu_target_page_bits(void)
3706{
3707 return TARGET_PAGE_BITS;
3708}
3709
Paul Brooka68fe892010-03-01 00:08:59 +00003710#endif
bellard13eb76e2004-01-24 15:23:36 +00003711
Blue Swirl8e4a4242013-01-06 18:30:17 +00003712/*
3713 * A helper function for the _utterly broken_ virtio device model to find out if
3714 * it's running on a big endian machine. Don't do this at home kids!
3715 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003716bool target_words_bigendian(void);
3717bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003718{
3719#if defined(TARGET_WORDS_BIGENDIAN)
3720 return true;
3721#else
3722 return false;
3723#endif
3724}
3725
Wen Congyang76f35532012-05-07 12:04:18 +08003726#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003727bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003728{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003729 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003730 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003731 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003732
Paolo Bonzini41063e12015-03-18 14:21:43 +01003733 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003734 mr = address_space_translate(&address_space_memory,
3735 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003736
Paolo Bonzini41063e12015-03-18 14:21:43 +01003737 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3738 rcu_read_unlock();
3739 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003740}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003741
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003742int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003743{
3744 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003745 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003746
Mike Day0dc3f442013-09-05 14:41:35 -04003747 rcu_read_lock();
3748 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003749 ret = func(block->idstr, block->host, block->offset,
3750 block->used_length, opaque);
3751 if (ret) {
3752 break;
3753 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003754 }
Mike Day0dc3f442013-09-05 14:41:35 -04003755 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003756 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003757}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003758#endif