blob: 9279af5eabcb99a61655dfd0d490ed83cb624101 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000021#include <sys/mman.h>
22#endif
bellard54936002003-05-13 00:25:15 +000023
Stefan Weil055403b2010-10-22 23:03:32 +020024#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000026#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000027#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010028#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020029#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010030#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010032#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020033#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010034#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010038#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000041#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010043#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010044#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010045#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000046#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010047#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040048#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020049#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000050#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030051#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000052
Paolo Bonzini022c62c2012-12-17 18:19:49 +010053#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020054#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030055#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030058#ifndef _WIN32
59#include "qemu/mmap-alloc.h"
60#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061
blueswir1db7b5422007-05-26 17:36:03 +000062//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000063
pbrook99773bd2006-04-16 15:14:59 +000064#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040065/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
66 * are protected by the ramlist lock.
67 */
Mike Day0d53d9f2015-01-21 13:45:24 +010068RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030069
70static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030071static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030072
Avi Kivityf6790af2012-10-02 20:13:51 +020073AddressSpace address_space_io;
74AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020075
Paolo Bonzini0844e002013-05-24 14:37:28 +020076MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020077static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020078
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080079/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
80#define RAM_PREALLOC (1 << 0)
81
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080082/* RAM is mmap-ed with MAP_SHARED */
83#define RAM_SHARED (1 << 1)
84
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020085/* Only a portion of RAM (used_length) is actually used, and migrated.
86 * This used_length size can change across reboots.
87 */
88#define RAM_RESIZEABLE (1 << 2)
89
pbrooke2eef172008-06-08 01:09:01 +000090#endif
bellard9fa3e852004-01-04 18:06:42 +000091
Andreas Färberbdc44642013-06-24 23:50:24 +020092struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000093/* current CPU in the current thread. It is only valid inside
94 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020095__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000096/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000097 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000098 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010099int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000100
pbrooke2eef172008-06-08 01:09:01 +0000101#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200102
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200103typedef struct PhysPageEntry PhysPageEntry;
104
105struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200106 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200107 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200108 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200109 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200110};
111
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200112#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
113
Paolo Bonzini03f49952013-11-07 17:14:36 +0100114/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100115#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100116
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200117#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118#define P_L2_SIZE (1 << P_L2_BITS)
119
120#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
121
122typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200123
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200124typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100125 struct rcu_head rcu;
126
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200127 unsigned sections_nb;
128 unsigned sections_nb_alloc;
129 unsigned nodes_nb;
130 unsigned nodes_nb_alloc;
131 Node *nodes;
132 MemoryRegionSection *sections;
133} PhysPageMap;
134
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200135struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100136 struct rcu_head rcu;
137
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200138 /* This is a multi-level map on the physical address space.
139 * The bottom level has pointers to MemoryRegionSections.
140 */
141 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200142 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200143 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200144};
145
Jan Kiszka90260c62013-05-26 21:46:51 +0200146#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
147typedef struct subpage_t {
148 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200149 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200150 hwaddr base;
151 uint16_t sub_section[TARGET_PAGE_SIZE];
152} subpage_t;
153
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200154#define PHYS_SECTION_UNASSIGNED 0
155#define PHYS_SECTION_NOTDIRTY 1
156#define PHYS_SECTION_ROM 2
157#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200158
pbrooke2eef172008-06-08 01:09:01 +0000159static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300160static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000161static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000162
Avi Kivity1ec9b902012-01-02 12:47:48 +0200163static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100164
165/**
166 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
167 * @cpu: the CPU whose AddressSpace this is
168 * @as: the AddressSpace itself
169 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
170 * @tcg_as_listener: listener for tracking changes to the AddressSpace
171 */
172struct CPUAddressSpace {
173 CPUState *cpu;
174 AddressSpace *as;
175 struct AddressSpaceDispatch *memory_dispatch;
176 MemoryListener tcg_as_listener;
177};
178
pbrook6658ffb2007-03-16 23:58:11 +0000179#endif
bellard54936002003-05-13 00:25:15 +0000180
Paul Brook6d9a1302010-02-28 23:55:53 +0000181#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200182
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200183static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200184{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
186 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
187 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
188 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189 }
190}
191
Paolo Bonzinidb946042015-05-21 15:12:29 +0200192static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200193{
194 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200195 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200196 PhysPageEntry e;
197 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200198
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200199 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200200 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200201 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200202 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203
204 e.skip = leaf ? 0 : 1;
205 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100206 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200209 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210}
211
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200212static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
213 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200214 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200215{
216 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100217 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200218
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200219 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200220 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200221 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200222 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100223 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200224
Paolo Bonzini03f49952013-11-07 17:14:36 +0100225 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200226 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200227 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200228 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200229 *index += step;
230 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200231 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200232 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200233 }
234 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200235 }
236}
237
Avi Kivityac1970f2012-10-03 16:22:53 +0200238static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200239 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200240 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000241{
Avi Kivity29990972012-02-13 20:21:20 +0200242 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200243 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000244
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200245 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000246}
247
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200248/* Compact a non leaf page entry. Simply detect that the entry has a single child,
249 * and update our entry so we can skip it and go directly to the destination.
250 */
251static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
252{
253 unsigned valid_ptr = P_L2_SIZE;
254 int valid = 0;
255 PhysPageEntry *p;
256 int i;
257
258 if (lp->ptr == PHYS_MAP_NODE_NIL) {
259 return;
260 }
261
262 p = nodes[lp->ptr];
263 for (i = 0; i < P_L2_SIZE; i++) {
264 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
265 continue;
266 }
267
268 valid_ptr = i;
269 valid++;
270 if (p[i].skip) {
271 phys_page_compact(&p[i], nodes, compacted);
272 }
273 }
274
275 /* We can only compress if there's only one child. */
276 if (valid != 1) {
277 return;
278 }
279
280 assert(valid_ptr < P_L2_SIZE);
281
282 /* Don't compress if it won't fit in the # of bits we have. */
283 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
284 return;
285 }
286
287 lp->ptr = p[valid_ptr].ptr;
288 if (!p[valid_ptr].skip) {
289 /* If our only child is a leaf, make this a leaf. */
290 /* By design, we should have made this node a leaf to begin with so we
291 * should never reach here.
292 * But since it's so simple to handle this, let's do it just in case we
293 * change this rule.
294 */
295 lp->skip = 0;
296 } else {
297 lp->skip += p[valid_ptr].skip;
298 }
299}
300
301static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
302{
303 DECLARE_BITMAP(compacted, nodes_nb);
304
305 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200306 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200307 }
308}
309
Fam Zheng29cb5332016-03-01 14:18:23 +0800310static inline bool section_covers_addr(const MemoryRegionSection *section,
311 hwaddr addr)
312{
313 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
314 * the section must cover the entire address space.
315 */
316 return section->size.hi ||
317 range_covers_byte(section->offset_within_address_space,
318 section->size.lo, addr);
319}
320
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200321static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200322 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000323{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200324 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200325 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200326 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200327
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200328 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200329 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200330 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200332 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100333 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200334 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200335
Fam Zheng29cb5332016-03-01 14:18:23 +0800336 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200337 return &sections[lp.ptr];
338 } else {
339 return &sections[PHYS_SECTION_UNASSIGNED];
340 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200341}
342
Blue Swirle5548612012-04-21 13:08:33 +0000343bool memory_region_is_unassigned(MemoryRegion *mr)
344{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200345 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000346 && mr != &io_mem_watch;
347}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200348
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100349/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200350static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200351 hwaddr addr,
352 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200353{
Jan Kiszka90260c62013-05-26 21:46:51 +0200354 MemoryRegionSection *section;
355 subpage_t *subpage;
356
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200357 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200358 if (resolve_subpage && section->mr->subpage) {
359 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200360 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200361 }
362 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200363}
364
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100365/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200366static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200367address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200368 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200369{
370 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200371 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100372 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200373
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200374 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200375 /* Compute offset within MemoryRegionSection */
376 addr -= section->offset_within_address_space;
377
378 /* Compute offset within MemoryRegion */
379 *xlat = addr + section->offset_within_region;
380
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200381 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200382
383 /* MMIO registers can be expected to perform full-width accesses based only
384 * on their address, without considering adjacent registers that could
385 * decode to completely different MemoryRegions. When such registers
386 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
387 * regions overlap wildly. For this reason we cannot clamp the accesses
388 * here.
389 *
390 * If the length is small (as is the case for address_space_ldl/stl),
391 * everything works fine. If the incoming length is large, however,
392 * the caller really has to do the clamping through memory_access_size.
393 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200394 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200395 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200396 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
397 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200398 return section;
399}
Jan Kiszka90260c62013-05-26 21:46:51 +0200400
Paolo Bonzini41063e12015-03-18 14:21:43 +0100401/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200402MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
403 hwaddr *xlat, hwaddr *plen,
404 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200405{
Avi Kivity30951152012-10-30 13:47:46 +0200406 IOMMUTLBEntry iotlb;
407 MemoryRegionSection *section;
408 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200409
410 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100411 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
412 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200413 mr = section->mr;
414
415 if (!mr->iommu_ops) {
416 break;
417 }
418
Le Tan8d7b8cb2014-08-16 13:55:37 +0800419 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200420 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
421 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700422 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200423 if (!(iotlb.perm & (1 << is_write))) {
424 mr = &io_mem_unassigned;
425 break;
426 }
427
428 as = iotlb.target_as;
429 }
430
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000431 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100432 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700433 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100434 }
435
Avi Kivity30951152012-10-30 13:47:46 +0200436 *xlat = addr;
437 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200438}
439
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100440/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200441MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000442address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200443 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200444{
Avi Kivity30951152012-10-30 13:47:46 +0200445 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000446 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
447
448 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200449
450 assert(!section->mr->iommu_ops);
451 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200452}
bellard9fa3e852004-01-04 18:06:42 +0000453#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000454
Andreas Färberb170fce2013-01-20 20:23:22 +0100455#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000456
Juan Quintelae59fb372009-09-29 22:48:21 +0200457static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200458{
Andreas Färber259186a2013-01-17 18:51:17 +0100459 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200460
aurel323098dba2009-03-07 21:28:24 +0000461 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
462 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100463 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100464 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000465
466 return 0;
467}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200468
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400469static int cpu_common_pre_load(void *opaque)
470{
471 CPUState *cpu = opaque;
472
Paolo Bonziniadee6422014-12-19 12:53:14 +0100473 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400474
475 return 0;
476}
477
478static bool cpu_common_exception_index_needed(void *opaque)
479{
480 CPUState *cpu = opaque;
481
Paolo Bonziniadee6422014-12-19 12:53:14 +0100482 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400483}
484
485static const VMStateDescription vmstate_cpu_common_exception_index = {
486 .name = "cpu_common/exception_index",
487 .version_id = 1,
488 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200489 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400490 .fields = (VMStateField[]) {
491 VMSTATE_INT32(exception_index, CPUState),
492 VMSTATE_END_OF_LIST()
493 }
494};
495
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300496static bool cpu_common_crash_occurred_needed(void *opaque)
497{
498 CPUState *cpu = opaque;
499
500 return cpu->crash_occurred;
501}
502
503static const VMStateDescription vmstate_cpu_common_crash_occurred = {
504 .name = "cpu_common/crash_occurred",
505 .version_id = 1,
506 .minimum_version_id = 1,
507 .needed = cpu_common_crash_occurred_needed,
508 .fields = (VMStateField[]) {
509 VMSTATE_BOOL(crash_occurred, CPUState),
510 VMSTATE_END_OF_LIST()
511 }
512};
513
Andreas Färber1a1562f2013-06-17 04:09:11 +0200514const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200515 .name = "cpu_common",
516 .version_id = 1,
517 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400518 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200519 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200520 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100521 VMSTATE_UINT32(halted, CPUState),
522 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200523 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200525 .subsections = (const VMStateDescription*[]) {
526 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300527 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200528 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200529 }
530};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200531
pbrook9656f322008-07-01 20:01:19 +0000532#endif
533
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100534CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400535{
Andreas Färberbdc44642013-06-24 23:50:24 +0200536 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400537
Andreas Färberbdc44642013-06-24 23:50:24 +0200538 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100539 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200540 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100541 }
Glauber Costa950f1472009-06-09 12:15:18 -0400542 }
543
Andreas Färberbdc44642013-06-24 23:50:24 +0200544 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400545}
546
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000547#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000548void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000549{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000550 CPUAddressSpace *newas;
551
552 /* Target code should have set num_ases before calling us */
553 assert(asidx < cpu->num_ases);
554
Peter Maydell56943e82016-01-21 14:15:04 +0000555 if (asidx == 0) {
556 /* address space 0 gets the convenience alias */
557 cpu->as = as;
558 }
559
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000560 /* KVM cannot currently support multiple address spaces. */
561 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000563 if (!cpu->cpu_ases) {
564 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565 }
Peter Maydell32857f42015-10-01 15:29:50 +0100566
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000567 newas = &cpu->cpu_ases[asidx];
568 newas->cpu = cpu;
569 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000570 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000571 newas->tcg_as_listener.commit = tcg_commit;
572 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000573 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000574}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000575
576AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
577{
578 /* Return the AddressSpace corresponding to the specified index */
579 return cpu->cpu_ases[asidx].as;
580}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000581#endif
582
Bharata B Raob7bca732015-06-23 19:31:13 -0700583#ifndef CONFIG_USER_ONLY
584static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
585
586static int cpu_get_free_index(Error **errp)
587{
588 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
589
590 if (cpu >= MAX_CPUMASK_BITS) {
591 error_setg(errp, "Trying to use more CPUs than max of %d",
592 MAX_CPUMASK_BITS);
593 return -1;
594 }
595
596 bitmap_set(cpu_index_map, cpu, 1);
597 return cpu;
598}
599
600void cpu_exec_exit(CPUState *cpu)
601{
602 if (cpu->cpu_index == -1) {
603 /* cpu_index was never allocated by this @cpu or was already freed. */
604 return;
605 }
606
607 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
608 cpu->cpu_index = -1;
609}
610#else
611
612static int cpu_get_free_index(Error **errp)
613{
614 CPUState *some_cpu;
615 int cpu_index = 0;
616
617 CPU_FOREACH(some_cpu) {
618 cpu_index++;
619 }
620 return cpu_index;
621}
622
623void cpu_exec_exit(CPUState *cpu)
624{
625}
626#endif
627
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700628void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000629{
Andreas Färberb170fce2013-01-20 20:23:22 +0100630 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000631 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700632 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000633
Peter Maydell56943e82016-01-21 14:15:04 +0000634 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000635 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000636
Eduardo Habkost291135b2015-04-27 17:00:33 -0300637#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300638 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000639
640 /* This is a softmmu CPU object, so create a property for it
641 * so users can wire up its memory. (This can't go in qom/cpu.c
642 * because that file is compiled only once for both user-mode
643 * and system builds.) The default if no link is set up is to use
644 * the system address space.
645 */
646 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
647 (Object **)&cpu->memory,
648 qdev_prop_allow_set_link_before_realize,
649 OBJ_PROP_LINK_UNREF_ON_RELEASE,
650 &error_abort);
651 cpu->memory = system_memory;
652 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300653#endif
654
pbrookc2764712009-03-07 15:24:59 +0000655#if defined(CONFIG_USER_ONLY)
656 cpu_list_lock();
657#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700658 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
659 if (local_err) {
660 error_propagate(errp, local_err);
661#if defined(CONFIG_USER_ONLY)
662 cpu_list_unlock();
663#endif
664 return;
bellard6a00d602005-11-21 23:25:50 +0000665 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200666 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000667#if defined(CONFIG_USER_ONLY)
668 cpu_list_unlock();
669#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200670 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
671 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
672 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100673 if (cc->vmsd != NULL) {
674 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
675 }
bellardfd6ce8f2003-05-14 19:00:11 +0000676}
677
Paul Brook94df27f2010-02-28 23:47:45 +0000678#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200679static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000680{
681 tb_invalidate_phys_page_range(pc, pc + 1, 0);
682}
683#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200684static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400685{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000686 MemTxAttrs attrs;
687 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
688 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400689 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000690 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100691 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400692 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400693}
bellardc27004e2005-01-03 23:35:10 +0000694#endif
bellardd720b932004-04-25 17:57:43 +0000695
Paul Brookc527ee82010-03-01 03:31:14 +0000696#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200697void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000698
699{
700}
701
Peter Maydell3ee887e2014-09-12 14:06:48 +0100702int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
703 int flags)
704{
705 return -ENOSYS;
706}
707
708void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
709{
710}
711
Andreas Färber75a34032013-09-02 16:57:02 +0200712int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000713 int flags, CPUWatchpoint **watchpoint)
714{
715 return -ENOSYS;
716}
717#else
pbrook6658ffb2007-03-16 23:58:11 +0000718/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200719int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000720 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000721{
aliguoric0ce9982008-11-25 22:13:57 +0000722 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000723
Peter Maydell05068c02014-09-12 14:06:48 +0100724 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700725 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200726 error_report("tried to set invalid watchpoint at %"
727 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000728 return -EINVAL;
729 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500730 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000731
aliguoria1d1bb32008-11-18 20:07:32 +0000732 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100733 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000734 wp->flags = flags;
735
aliguori2dc9f412008-11-18 20:56:59 +0000736 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200737 if (flags & BP_GDB) {
738 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
739 } else {
740 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
741 }
aliguoria1d1bb32008-11-18 20:07:32 +0000742
Andreas Färber31b030d2013-09-04 01:29:02 +0200743 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000744
745 if (watchpoint)
746 *watchpoint = wp;
747 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000748}
749
aliguoria1d1bb32008-11-18 20:07:32 +0000750/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200751int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000752 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000753{
aliguoria1d1bb32008-11-18 20:07:32 +0000754 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000755
Andreas Färberff4700b2013-08-26 18:23:18 +0200756 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100757 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000758 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200759 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000760 return 0;
761 }
762 }
aliguoria1d1bb32008-11-18 20:07:32 +0000763 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000764}
765
aliguoria1d1bb32008-11-18 20:07:32 +0000766/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200767void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000768{
Andreas Färberff4700b2013-08-26 18:23:18 +0200769 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000770
Andreas Färber31b030d2013-09-04 01:29:02 +0200771 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000772
Anthony Liguori7267c092011-08-20 22:09:37 -0500773 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000774}
775
aliguoria1d1bb32008-11-18 20:07:32 +0000776/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200777void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000778{
aliguoric0ce9982008-11-25 22:13:57 +0000779 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000780
Andreas Färberff4700b2013-08-26 18:23:18 +0200781 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200782 if (wp->flags & mask) {
783 cpu_watchpoint_remove_by_ref(cpu, wp);
784 }
aliguoric0ce9982008-11-25 22:13:57 +0000785 }
aliguoria1d1bb32008-11-18 20:07:32 +0000786}
Peter Maydell05068c02014-09-12 14:06:48 +0100787
788/* Return true if this watchpoint address matches the specified
789 * access (ie the address range covered by the watchpoint overlaps
790 * partially or completely with the address range covered by the
791 * access).
792 */
793static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
794 vaddr addr,
795 vaddr len)
796{
797 /* We know the lengths are non-zero, but a little caution is
798 * required to avoid errors in the case where the range ends
799 * exactly at the top of the address space and so addr + len
800 * wraps round to zero.
801 */
802 vaddr wpend = wp->vaddr + wp->len - 1;
803 vaddr addrend = addr + len - 1;
804
805 return !(addr > wpend || wp->vaddr > addrend);
806}
807
Paul Brookc527ee82010-03-01 03:31:14 +0000808#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000809
810/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200811int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000812 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000813{
aliguoric0ce9982008-11-25 22:13:57 +0000814 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000815
Anthony Liguori7267c092011-08-20 22:09:37 -0500816 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000817
818 bp->pc = pc;
819 bp->flags = flags;
820
aliguori2dc9f412008-11-18 20:56:59 +0000821 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200822 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200823 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200824 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200825 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200826 }
aliguoria1d1bb32008-11-18 20:07:32 +0000827
Andreas Färberf0c3c502013-08-26 21:22:53 +0200828 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000829
Andreas Färber00b941e2013-06-29 18:55:54 +0200830 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000831 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200832 }
aliguoria1d1bb32008-11-18 20:07:32 +0000833 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000834}
835
836/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200837int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000838{
aliguoria1d1bb32008-11-18 20:07:32 +0000839 CPUBreakpoint *bp;
840
Andreas Färberf0c3c502013-08-26 21:22:53 +0200841 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000842 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200843 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000844 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000845 }
bellard4c3a88a2003-07-26 12:06:08 +0000846 }
aliguoria1d1bb32008-11-18 20:07:32 +0000847 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000848}
849
aliguoria1d1bb32008-11-18 20:07:32 +0000850/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200851void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000852{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200853 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
854
855 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000856
Anthony Liguori7267c092011-08-20 22:09:37 -0500857 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000858}
859
860/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200861void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000862{
aliguoric0ce9982008-11-25 22:13:57 +0000863 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000864
Andreas Färberf0c3c502013-08-26 21:22:53 +0200865 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200866 if (bp->flags & mask) {
867 cpu_breakpoint_remove_by_ref(cpu, bp);
868 }
aliguoric0ce9982008-11-25 22:13:57 +0000869 }
bellard4c3a88a2003-07-26 12:06:08 +0000870}
871
bellardc33a3462003-07-29 20:50:33 +0000872/* enable or disable single step mode. EXCP_DEBUG is returned by the
873 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200874void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000875{
Andreas Färbered2803d2013-06-21 20:20:45 +0200876 if (cpu->singlestep_enabled != enabled) {
877 cpu->singlestep_enabled = enabled;
878 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200879 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200880 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100881 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000882 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700883 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000884 }
bellardc33a3462003-07-29 20:50:33 +0000885 }
bellardc33a3462003-07-29 20:50:33 +0000886}
887
Andreas Färbera47dddd2013-09-03 17:38:47 +0200888void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000889{
890 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000891 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000892
893 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000894 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000895 fprintf(stderr, "qemu: fatal: ");
896 vfprintf(stderr, fmt, ap);
897 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200898 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100899 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000900 qemu_log("qemu: fatal: ");
901 qemu_log_vprintf(fmt, ap2);
902 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200903 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000904 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000905 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000906 }
pbrook493ae1f2007-11-23 16:53:59 +0000907 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000908 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300909 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200910#if defined(CONFIG_USER_ONLY)
911 {
912 struct sigaction act;
913 sigfillset(&act.sa_mask);
914 act.sa_handler = SIG_DFL;
915 sigaction(SIGABRT, &act, NULL);
916 }
917#endif
bellard75012672003-06-21 13:11:07 +0000918 abort();
919}
920
bellard01243112004-01-04 15:48:17 +0000921#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400922/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200923static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
924{
925 RAMBlock *block;
926
Paolo Bonzini43771532013-09-09 17:58:40 +0200927 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200928 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200929 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200930 }
Mike Day0dc3f442013-09-05 14:41:35 -0400931 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200932 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200933 goto found;
934 }
935 }
936
937 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
938 abort();
939
940found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200941 /* It is safe to write mru_block outside the iothread lock. This
942 * is what happens:
943 *
944 * mru_block = xxx
945 * rcu_read_unlock()
946 * xxx removed from list
947 * rcu_read_lock()
948 * read mru_block
949 * mru_block = NULL;
950 * call_rcu(reclaim_ramblock, xxx);
951 * rcu_read_unlock()
952 *
953 * atomic_rcu_set is not needed here. The block was already published
954 * when it was placed into the list. Here we're just making an extra
955 * copy of the pointer.
956 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200957 ram_list.mru_block = block;
958 return block;
959}
960
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200961static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000962{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700963 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200964 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200965 RAMBlock *block;
966 ram_addr_t end;
967
968 end = TARGET_PAGE_ALIGN(start + length);
969 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000970
Mike Day0dc3f442013-09-05 14:41:35 -0400971 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200972 block = qemu_get_ram_block(start);
973 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200974 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700975 CPU_FOREACH(cpu) {
976 tlb_reset_dirty(cpu, start1, length);
977 }
Mike Day0dc3f442013-09-05 14:41:35 -0400978 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200979}
980
981/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000982bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
983 ram_addr_t length,
984 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200985{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000986 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000987 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000988 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200989
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000990 if (length == 0) {
991 return false;
992 }
993
994 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
995 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000996
997 rcu_read_lock();
998
999 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1000
1001 while (page < end) {
1002 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1003 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1004 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1005
1006 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1007 offset, num);
1008 page += num;
1009 }
1010
1011 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001012
1013 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001014 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001015 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001016
1017 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001018}
1019
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001020/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001021hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001022 MemoryRegionSection *section,
1023 target_ulong vaddr,
1024 hwaddr paddr, hwaddr xlat,
1025 int prot,
1026 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001027{
Avi Kivitya8170e52012-10-23 12:30:10 +02001028 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001029 CPUWatchpoint *wp;
1030
Blue Swirlcc5bea62012-04-14 14:56:48 +00001031 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001032 /* Normal RAM. */
1033 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001034 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001035 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001036 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001037 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001038 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001039 }
1040 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001041 AddressSpaceDispatch *d;
1042
1043 d = atomic_rcu_read(&section->address_space->dispatch);
1044 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001045 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001046 }
1047
1048 /* Make accesses to pages with watchpoints go via the
1049 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001050 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001051 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001052 /* Avoid trapping reads of pages with a write breakpoint. */
1053 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001054 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001055 *address |= TLB_MMIO;
1056 break;
1057 }
1058 }
1059 }
1060
1061 return iotlb;
1062}
bellard9fa3e852004-01-04 18:06:42 +00001063#endif /* defined(CONFIG_USER_ONLY) */
1064
pbrooke2eef172008-06-08 01:09:01 +00001065#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001066
Anthony Liguoric227f092009-10-01 16:12:16 -05001067static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001068 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001069static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001070
Igor Mammedova2b257d2014-10-31 16:38:37 +00001071static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1072 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001073
1074/*
1075 * Set a custom physical guest memory alloator.
1076 * Accelerators with unusual needs may need this. Hopefully, we can
1077 * get rid of it eventually.
1078 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001079void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001080{
1081 phys_mem_alloc = alloc;
1082}
1083
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001084static uint16_t phys_section_add(PhysPageMap *map,
1085 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001086{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001087 /* The physical section number is ORed with a page-aligned
1088 * pointer to produce the iotlb entries. Thus it should
1089 * never overflow into the page-aligned value.
1090 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001091 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001092
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001093 if (map->sections_nb == map->sections_nb_alloc) {
1094 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1095 map->sections = g_renew(MemoryRegionSection, map->sections,
1096 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001097 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001098 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001099 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001100 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001101}
1102
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001103static void phys_section_destroy(MemoryRegion *mr)
1104{
Don Slutz55b4e802015-11-30 17:11:04 -05001105 bool have_sub_page = mr->subpage;
1106
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001107 memory_region_unref(mr);
1108
Don Slutz55b4e802015-11-30 17:11:04 -05001109 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001110 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001111 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001112 g_free(subpage);
1113 }
1114}
1115
Paolo Bonzini60926662013-05-29 12:30:26 +02001116static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001117{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001118 while (map->sections_nb > 0) {
1119 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001120 phys_section_destroy(section->mr);
1121 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001122 g_free(map->sections);
1123 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001124}
1125
Avi Kivityac1970f2012-10-03 16:22:53 +02001126static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001127{
1128 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001129 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001130 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001131 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001132 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001133 MemoryRegionSection subsection = {
1134 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001135 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001136 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001137 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001138
Avi Kivityf3705d52012-03-08 16:16:34 +02001139 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001140
Avi Kivityf3705d52012-03-08 16:16:34 +02001141 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001142 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001143 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001145 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001146 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001147 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001148 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001149 }
1150 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001151 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001152 subpage_register(subpage, start, end,
1153 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001154}
1155
1156
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001157static void register_multipage(AddressSpaceDispatch *d,
1158 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001159{
Avi Kivitya8170e52012-10-23 12:30:10 +02001160 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001161 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001162 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1163 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001164
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001165 assert(num_pages);
1166 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001167}
1168
Avi Kivityac1970f2012-10-03 16:22:53 +02001169static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001170{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001171 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001172 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001173 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001174 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001175
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001176 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1177 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1178 - now.offset_within_address_space;
1179
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001180 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001181 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001182 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001183 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001184 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001185 while (int128_ne(remain.size, now.size)) {
1186 remain.size = int128_sub(remain.size, now.size);
1187 remain.offset_within_address_space += int128_get64(now.size);
1188 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001189 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001190 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001191 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001192 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001194 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001195 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001196 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001197 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001198 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001199 }
1200}
1201
Sheng Yang62a27442010-01-26 19:21:16 +08001202void qemu_flush_coalesced_mmio_buffer(void)
1203{
1204 if (kvm_enabled())
1205 kvm_flush_coalesced_mmio_buffer();
1206}
1207
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001208void qemu_mutex_lock_ramlist(void)
1209{
1210 qemu_mutex_lock(&ram_list.mutex);
1211}
1212
1213void qemu_mutex_unlock_ramlist(void)
1214{
1215 qemu_mutex_unlock(&ram_list.mutex);
1216}
1217
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001218#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001219
1220#include <sys/vfs.h>
1221
1222#define HUGETLBFS_MAGIC 0x958458f6
1223
Hu Taofc7a5802014-09-09 13:28:01 +08001224static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001225{
1226 struct statfs fs;
1227 int ret;
1228
1229 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001230 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231 } while (ret != 0 && errno == EINTR);
1232
1233 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001234 error_setg_errno(errp, errno, "failed to get page size of file %s",
1235 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001236 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001237 }
1238
Marcelo Tosattic9027602010-03-01 20:25:08 -03001239 return fs.f_bsize;
1240}
1241
Alex Williamson04b16652010-07-02 11:13:17 -06001242static void *file_ram_alloc(RAMBlock *block,
1243 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001244 const char *path,
1245 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001246{
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001247 struct stat st;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001248 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001249 char *sanitized_name;
1250 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001251 void *area;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001252 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001253 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001254 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001255
Hu Taofc7a5802014-09-09 13:28:01 +08001256 hpagesize = gethugepagesize(path, &local_err);
1257 if (local_err) {
1258 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001259 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001260 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001261 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001262
1263 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001264 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1265 "or larger than huge page size 0x%" PRIx64,
1266 memory, hpagesize);
1267 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001268 }
1269
1270 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001271 error_setg(errp,
1272 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001273 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001274 }
1275
Pavel Fedin8d31d6b2015-10-28 12:54:07 +03001276 if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
1277 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1278 sanitized_name = g_strdup(memory_region_name(block->mr));
1279 for (c = sanitized_name; *c != '\0'; c++) {
1280 if (*c == '/') {
1281 *c = '_';
1282 }
1283 }
1284
1285 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1286 sanitized_name);
1287 g_free(sanitized_name);
1288
1289 fd = mkstemp(filename);
1290 if (fd >= 0) {
1291 unlink(filename);
1292 }
1293 g_free(filename);
1294 } else {
1295 fd = open(path, O_RDWR | O_CREAT, 0644);
Peter Feiner8ca761f2013-03-04 13:54:25 -05001296 }
1297
Marcelo Tosattic9027602010-03-01 20:25:08 -03001298 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001299 error_setg_errno(errp, errno,
1300 "unable to create backing store for hugepages");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001301 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001302 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001303
Chen Hanxiao9284f312015-07-24 11:12:03 +08001304 memory = ROUND_UP(memory, hpagesize);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001305
1306 /*
1307 * ftruncate is not supported by hugetlbfs in older
1308 * hosts, so don't bother bailing out on errors.
1309 * If anything goes wrong with it under other filesystems,
1310 * mmap will fail.
1311 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001312 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001313 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001314 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001315
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001316 area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001317 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001318 error_setg_errno(errp, errno,
1319 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001320 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001321 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001322 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001323
1324 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001325 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001326 }
1327
Alex Williamson04b16652010-07-02 11:13:17 -06001328 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001329 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001330
1331error:
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001332 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001333}
1334#endif
1335
Mike Day0dc3f442013-09-05 14:41:35 -04001336/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001337static ram_addr_t find_ram_offset(ram_addr_t size)
1338{
Alex Williamson04b16652010-07-02 11:13:17 -06001339 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001340 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001341
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001342 assert(size != 0); /* it would hand out same offset multiple times */
1343
Mike Day0dc3f442013-09-05 14:41:35 -04001344 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001345 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001346 }
Alex Williamson04b16652010-07-02 11:13:17 -06001347
Mike Day0dc3f442013-09-05 14:41:35 -04001348 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001349 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001350
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001351 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001352
Mike Day0dc3f442013-09-05 14:41:35 -04001353 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001354 if (next_block->offset >= end) {
1355 next = MIN(next, next_block->offset);
1356 }
1357 }
1358 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001359 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001360 mingap = next - end;
1361 }
1362 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001363
1364 if (offset == RAM_ADDR_MAX) {
1365 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1366 (uint64_t)size);
1367 abort();
1368 }
1369
Alex Williamson04b16652010-07-02 11:13:17 -06001370 return offset;
1371}
1372
Juan Quintela652d7ec2012-07-20 10:37:54 +02001373ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001374{
Alex Williamsond17b5282010-06-25 11:08:38 -06001375 RAMBlock *block;
1376 ram_addr_t last = 0;
1377
Mike Day0dc3f442013-09-05 14:41:35 -04001378 rcu_read_lock();
1379 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001380 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001381 }
Mike Day0dc3f442013-09-05 14:41:35 -04001382 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001383 return last;
1384}
1385
Jason Baronddb97f12012-08-02 15:44:16 -04001386static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1387{
1388 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001389
1390 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001391 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001392 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1393 if (ret) {
1394 perror("qemu_madvise");
1395 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1396 "but dump_guest_core=off specified\n");
1397 }
1398 }
1399}
1400
Mike Day0dc3f442013-09-05 14:41:35 -04001401/* Called within an RCU critical section, or while the ramlist lock
1402 * is held.
1403 */
Hu Tao20cfe882014-04-02 15:13:26 +08001404static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001405{
Hu Tao20cfe882014-04-02 15:13:26 +08001406 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001407
Mike Day0dc3f442013-09-05 14:41:35 -04001408 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001409 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001410 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001411 }
1412 }
Hu Tao20cfe882014-04-02 15:13:26 +08001413
1414 return NULL;
1415}
1416
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001417const char *qemu_ram_get_idstr(RAMBlock *rb)
1418{
1419 return rb->idstr;
1420}
1421
Mike Dayae3a7042013-09-05 14:41:35 -04001422/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001423void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1424{
Mike Dayae3a7042013-09-05 14:41:35 -04001425 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001426
Mike Day0dc3f442013-09-05 14:41:35 -04001427 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001428 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001429 assert(new_block);
1430 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001431
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001432 if (dev) {
1433 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001434 if (id) {
1435 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001436 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001437 }
1438 }
1439 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1440
Mike Day0dc3f442013-09-05 14:41:35 -04001441 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001442 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001443 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1444 new_block->idstr);
1445 abort();
1446 }
1447 }
Mike Day0dc3f442013-09-05 14:41:35 -04001448 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001449}
1450
Mike Dayae3a7042013-09-05 14:41:35 -04001451/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001452void qemu_ram_unset_idstr(ram_addr_t addr)
1453{
Mike Dayae3a7042013-09-05 14:41:35 -04001454 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001455
Mike Dayae3a7042013-09-05 14:41:35 -04001456 /* FIXME: arch_init.c assumes that this is not called throughout
1457 * migration. Ignore the problem since hot-unplug during migration
1458 * does not work anyway.
1459 */
1460
Mike Day0dc3f442013-09-05 14:41:35 -04001461 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001462 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001463 if (block) {
1464 memset(block->idstr, 0, sizeof(block->idstr));
1465 }
Mike Day0dc3f442013-09-05 14:41:35 -04001466 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001467}
1468
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001469static int memory_try_enable_merging(void *addr, size_t len)
1470{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001471 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001472 /* disabled by the user */
1473 return 0;
1474 }
1475
1476 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1477}
1478
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001479/* Only legal before guest might have detected the memory size: e.g. on
1480 * incoming migration, or right after reset.
1481 *
1482 * As memory core doesn't know how is memory accessed, it is up to
1483 * resize callback to update device state and/or add assertions to detect
1484 * misuse, if necessary.
1485 */
1486int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1487{
1488 RAMBlock *block = find_ram_block(base);
1489
1490 assert(block);
1491
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001492 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001493
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001494 if (block->used_length == newsize) {
1495 return 0;
1496 }
1497
1498 if (!(block->flags & RAM_RESIZEABLE)) {
1499 error_setg_errno(errp, EINVAL,
1500 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1501 " in != 0x" RAM_ADDR_FMT, block->idstr,
1502 newsize, block->used_length);
1503 return -EINVAL;
1504 }
1505
1506 if (block->max_length < newsize) {
1507 error_setg_errno(errp, EINVAL,
1508 "Length too large: %s: 0x" RAM_ADDR_FMT
1509 " > 0x" RAM_ADDR_FMT, block->idstr,
1510 newsize, block->max_length);
1511 return -EINVAL;
1512 }
1513
1514 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1515 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001516 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1517 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001518 memory_region_set_size(block->mr, newsize);
1519 if (block->resized) {
1520 block->resized(block->idstr, newsize, block->host);
1521 }
1522 return 0;
1523}
1524
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001525/* Called with ram_list.mutex held */
1526static void dirty_memory_extend(ram_addr_t old_ram_size,
1527 ram_addr_t new_ram_size)
1528{
1529 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1530 DIRTY_MEMORY_BLOCK_SIZE);
1531 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1532 DIRTY_MEMORY_BLOCK_SIZE);
1533 int i;
1534
1535 /* Only need to extend if block count increased */
1536 if (new_num_blocks <= old_num_blocks) {
1537 return;
1538 }
1539
1540 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1541 DirtyMemoryBlocks *old_blocks;
1542 DirtyMemoryBlocks *new_blocks;
1543 int j;
1544
1545 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1546 new_blocks = g_malloc(sizeof(*new_blocks) +
1547 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1548
1549 if (old_num_blocks) {
1550 memcpy(new_blocks->blocks, old_blocks->blocks,
1551 old_num_blocks * sizeof(old_blocks->blocks[0]));
1552 }
1553
1554 for (j = old_num_blocks; j < new_num_blocks; j++) {
1555 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1556 }
1557
1558 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1559
1560 if (old_blocks) {
1561 g_free_rcu(old_blocks, rcu);
1562 }
1563 }
1564}
1565
Fam Zheng528f46a2016-03-01 14:18:18 +08001566static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001567{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001568 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001569 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001570 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001571 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001572
1573 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001574
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001575 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001576 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577
1578 if (!new_block->host) {
1579 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001580 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001581 new_block->mr, &err);
1582 if (err) {
1583 error_propagate(errp, err);
1584 qemu_mutex_unlock_ramlist();
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001585 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001586 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001587 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001588 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001589 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001590 error_setg_errno(errp, errno,
1591 "cannot set up guest memory '%s'",
1592 memory_region_name(new_block->mr));
1593 qemu_mutex_unlock_ramlist();
Markus Armbruster39228252013-07-31 15:11:11 +02001594 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001595 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001596 }
1597 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001598
Li Zhijiandd631692015-07-02 20:18:06 +08001599 new_ram_size = MAX(old_ram_size,
1600 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1601 if (new_ram_size > old_ram_size) {
1602 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001603 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001604 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001605 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1606 * QLIST (which has an RCU-friendly variant) does not have insertion at
1607 * tail, so save the last element in last_block.
1608 */
Mike Day0dc3f442013-09-05 14:41:35 -04001609 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001610 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001611 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001612 break;
1613 }
1614 }
1615 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001616 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001617 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001618 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001619 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001620 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001621 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001622 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001623
Mike Day0dc3f442013-09-05 14:41:35 -04001624 /* Write list before version */
1625 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001626 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001627 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001628
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001629 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001630 new_block->used_length,
1631 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001632
Paolo Bonzinia904c912015-01-21 16:18:35 +01001633 if (new_block->host) {
1634 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1635 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1636 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1637 if (kvm_enabled()) {
1638 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1639 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001640 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001641}
1642
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001643#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001644RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1645 bool share, const char *mem_path,
1646 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001647{
1648 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001649 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001650
1651 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001652 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001653 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001654 }
1655
1656 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1657 /*
1658 * file_ram_alloc() needs to allocate just like
1659 * phys_mem_alloc, but we haven't bothered to provide
1660 * a hook there.
1661 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001662 error_setg(errp,
1663 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001664 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001665 }
1666
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001667 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001668 new_block = g_malloc0(sizeof(*new_block));
1669 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001670 new_block->used_length = size;
1671 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001672 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001673 new_block->host = file_ram_alloc(new_block, size,
1674 mem_path, errp);
1675 if (!new_block->host) {
1676 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001677 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001678 }
1679
Fam Zheng528f46a2016-03-01 14:18:18 +08001680 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001681 if (local_err) {
1682 g_free(new_block);
1683 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001684 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001685 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001686 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001687}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001688#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001689
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001690static
Fam Zheng528f46a2016-03-01 14:18:18 +08001691RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1692 void (*resized)(const char*,
1693 uint64_t length,
1694 void *host),
1695 void *host, bool resizeable,
1696 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001697{
1698 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001699 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001700
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001701 size = HOST_PAGE_ALIGN(size);
1702 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001703 new_block = g_malloc0(sizeof(*new_block));
1704 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001705 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001706 new_block->used_length = size;
1707 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001708 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001709 new_block->fd = -1;
1710 new_block->host = host;
1711 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001712 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001713 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001714 if (resizeable) {
1715 new_block->flags |= RAM_RESIZEABLE;
1716 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001717 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001718 if (local_err) {
1719 g_free(new_block);
1720 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001721 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001722 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001723 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001724}
1725
Fam Zheng528f46a2016-03-01 14:18:18 +08001726RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001727 MemoryRegion *mr, Error **errp)
1728{
1729 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1730}
1731
Fam Zheng528f46a2016-03-01 14:18:18 +08001732RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001733{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001734 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1735}
1736
Fam Zheng528f46a2016-03-01 14:18:18 +08001737RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001738 void (*resized)(const char*,
1739 uint64_t length,
1740 void *host),
1741 MemoryRegion *mr, Error **errp)
1742{
1743 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001744}
bellarde9a1ab12007-02-08 23:08:38 +00001745
Paolo Bonzini43771532013-09-09 17:58:40 +02001746static void reclaim_ramblock(RAMBlock *block)
1747{
1748 if (block->flags & RAM_PREALLOC) {
1749 ;
1750 } else if (xen_enabled()) {
1751 xen_invalidate_map_cache_entry(block->host);
1752#ifndef _WIN32
1753 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001754 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001755 close(block->fd);
1756#endif
1757 } else {
1758 qemu_anon_ram_free(block->host, block->max_length);
1759 }
1760 g_free(block);
1761}
1762
Fam Zhengf1060c52016-03-01 14:18:22 +08001763void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001764{
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001765 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001766 QLIST_REMOVE_RCU(block, next);
1767 ram_list.mru_block = NULL;
1768 /* Write list before version */
1769 smp_wmb();
1770 ram_list.version++;
1771 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001772 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001773}
1774
Huang Yingcd19cfa2011-03-02 08:56:19 +01001775#ifndef _WIN32
1776void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1777{
1778 RAMBlock *block;
1779 ram_addr_t offset;
1780 int flags;
1781 void *area, *vaddr;
1782
Mike Day0dc3f442013-09-05 14:41:35 -04001783 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001784 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001785 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001786 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001787 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001788 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001789 } else if (xen_enabled()) {
1790 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001791 } else {
1792 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001793 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001794 flags |= (block->flags & RAM_SHARED ?
1795 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001796 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1797 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001798 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001799 /*
1800 * Remap needs to match alloc. Accelerators that
1801 * set phys_mem_alloc never remap. If they did,
1802 * we'd need a remap hook here.
1803 */
1804 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1805
Huang Yingcd19cfa2011-03-02 08:56:19 +01001806 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1807 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1808 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001809 }
1810 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001811 fprintf(stderr, "Could not remap addr: "
1812 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001813 length, addr);
1814 exit(1);
1815 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001816 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001817 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001818 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001819 }
1820 }
1821}
1822#endif /* !_WIN32 */
1823
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001824int qemu_get_ram_fd(ram_addr_t addr)
1825{
Mike Dayae3a7042013-09-05 14:41:35 -04001826 RAMBlock *block;
1827 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001828
Mike Day0dc3f442013-09-05 14:41:35 -04001829 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001830 block = qemu_get_ram_block(addr);
1831 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001832 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001833 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001834}
1835
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001836void qemu_set_ram_fd(ram_addr_t addr, int fd)
1837{
1838 RAMBlock *block;
1839
1840 rcu_read_lock();
1841 block = qemu_get_ram_block(addr);
1842 block->fd = fd;
1843 rcu_read_unlock();
1844}
1845
Damjan Marion3fd74b82014-06-26 23:01:32 +02001846void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1847{
Mike Dayae3a7042013-09-05 14:41:35 -04001848 RAMBlock *block;
1849 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001850
Mike Day0dc3f442013-09-05 14:41:35 -04001851 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001852 block = qemu_get_ram_block(addr);
1853 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001854 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001855 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001856}
1857
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001858/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001859 * This should not be used for general purpose DMA. Use address_space_map
1860 * or address_space_rw instead. For local memory (e.g. video ram) that the
1861 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001862 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001863 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001864 */
Gonglei3655cb92016-02-20 10:35:20 +08001865void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001866{
Gonglei3655cb92016-02-20 10:35:20 +08001867 RAMBlock *block = ram_block;
1868
1869 if (block == NULL) {
1870 block = qemu_get_ram_block(addr);
1871 }
Mike Dayae3a7042013-09-05 14:41:35 -04001872
1873 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001874 /* We need to check if the requested address is in the RAM
1875 * because we don't want to map the entire memory in QEMU.
1876 * In that case just map until the end of the page.
1877 */
1878 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001879 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001880 }
Mike Dayae3a7042013-09-05 14:41:35 -04001881
1882 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001883 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001884 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001885}
1886
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001887/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001888 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001889 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001890 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001891 */
Gonglei3655cb92016-02-20 10:35:20 +08001892static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1893 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001894{
Gonglei3655cb92016-02-20 10:35:20 +08001895 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001896 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001897 if (*size == 0) {
1898 return NULL;
1899 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001900
Gonglei3655cb92016-02-20 10:35:20 +08001901 if (block == NULL) {
1902 block = qemu_get_ram_block(addr);
1903 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001904 offset_inside_block = addr - block->offset;
1905 *size = MIN(*size, block->max_length - offset_inside_block);
1906
1907 if (xen_enabled() && block->host == NULL) {
1908 /* We need to check if the requested address is in the RAM
1909 * because we don't want to map the entire memory in QEMU.
1910 * In that case just map the requested area.
1911 */
1912 if (block->offset == 0) {
1913 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001914 }
1915
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001916 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001917 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001918
1919 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001920}
1921
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001922/*
1923 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1924 * in that RAMBlock.
1925 *
1926 * ptr: Host pointer to look up
1927 * round_offset: If true round the result offset down to a page boundary
1928 * *ram_addr: set to result ram_addr
1929 * *offset: set to result offset within the RAMBlock
1930 *
1931 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001932 *
1933 * By the time this function returns, the returned pointer is not protected
1934 * by RCU anymore. If the caller is not within an RCU critical section and
1935 * does not hold the iothread lock, it must have other means of protecting the
1936 * pointer, such as a reference to the region that includes the incoming
1937 * ram_addr_t.
1938 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001939RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1940 ram_addr_t *ram_addr,
1941 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001942{
pbrook94a6b542009-04-11 17:15:54 +00001943 RAMBlock *block;
1944 uint8_t *host = ptr;
1945
Jan Kiszka868bb332011-06-21 22:59:09 +02001946 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001947 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001948 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001949 block = qemu_get_ram_block(*ram_addr);
1950 if (block) {
1951 *offset = (host - block->host);
1952 }
Mike Day0dc3f442013-09-05 14:41:35 -04001953 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001954 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001955 }
1956
Mike Day0dc3f442013-09-05 14:41:35 -04001957 rcu_read_lock();
1958 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001959 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001960 goto found;
1961 }
1962
Mike Day0dc3f442013-09-05 14:41:35 -04001963 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001964 /* This case append when the block is not mapped. */
1965 if (block->host == NULL) {
1966 continue;
1967 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001968 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001969 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001970 }
pbrook94a6b542009-04-11 17:15:54 +00001971 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001972
Mike Day0dc3f442013-09-05 14:41:35 -04001973 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001974 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001975
1976found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001977 *offset = (host - block->host);
1978 if (round_offset) {
1979 *offset &= TARGET_PAGE_MASK;
1980 }
1981 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001982 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001983 return block;
1984}
1985
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001986/*
1987 * Finds the named RAMBlock
1988 *
1989 * name: The name of RAMBlock to find
1990 *
1991 * Returns: RAMBlock (or NULL if not found)
1992 */
1993RAMBlock *qemu_ram_block_by_name(const char *name)
1994{
1995 RAMBlock *block;
1996
1997 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1998 if (!strcmp(name, block->idstr)) {
1999 return block;
2000 }
2001 }
2002
2003 return NULL;
2004}
2005
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002006/* Some of the softmmu routines need to translate from a host pointer
2007 (typically a TLB entry) back to a ram offset. */
2008MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2009{
2010 RAMBlock *block;
2011 ram_addr_t offset; /* Not used */
2012
2013 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2014
2015 if (!block) {
2016 return NULL;
2017 }
2018
2019 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002020}
Alex Williamsonf471a172010-06-11 11:11:42 -06002021
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002022/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002023static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002024 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002025{
Juan Quintela52159192013-10-08 12:44:04 +02002026 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002027 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002028 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002029 switch (size) {
2030 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002031 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002032 break;
2033 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002034 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002035 break;
2036 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002037 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002038 break;
2039 default:
2040 abort();
2041 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002042 /* Set both VGA and migration bits for simplicity and to remove
2043 * the notdirty callback faster.
2044 */
2045 cpu_physical_memory_set_dirty_range(ram_addr, size,
2046 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002047 /* we remove the notdirty callback only if the code has been
2048 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002049 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002050 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002051 }
bellard1ccde1c2004-02-06 19:46:14 +00002052}
2053
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002054static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2055 unsigned size, bool is_write)
2056{
2057 return is_write;
2058}
2059
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002060static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002061 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002062 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002063 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002064};
2065
pbrook0f459d12008-06-09 00:20:13 +00002066/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002067static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002068{
Andreas Färber93afead2013-08-26 03:41:01 +02002069 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002070 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002071 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002072 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002073 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002074 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002075 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002076
Andreas Färberff4700b2013-08-26 18:23:18 +02002077 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002078 /* We re-entered the check after replacing the TB. Now raise
2079 * the debug interrupt so that is will trigger after the
2080 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002081 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002082 return;
2083 }
Andreas Färber93afead2013-08-26 03:41:01 +02002084 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002085 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002086 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2087 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002088 if (flags == BP_MEM_READ) {
2089 wp->flags |= BP_WATCHPOINT_HIT_READ;
2090 } else {
2091 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2092 }
2093 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002094 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002095 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002096 if (wp->flags & BP_CPU &&
2097 !cc->debug_check_watchpoint(cpu, wp)) {
2098 wp->flags &= ~BP_WATCHPOINT_HIT;
2099 continue;
2100 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002101 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002102 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002103 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002104 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002105 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002106 } else {
2107 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002108 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002109 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002110 }
aliguori06d55cc2008-11-18 20:24:06 +00002111 }
aliguori6e140f22008-11-18 20:37:55 +00002112 } else {
2113 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002114 }
2115 }
2116}
2117
pbrook6658ffb2007-03-16 23:58:11 +00002118/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2119 so these check for a hit then pass through to the normal out-of-line
2120 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002121static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2122 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002123{
Peter Maydell66b9b432015-04-26 16:49:24 +01002124 MemTxResult res;
2125 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002126 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2127 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002128
Peter Maydell66b9b432015-04-26 16:49:24 +01002129 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002130 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002131 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002132 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002133 break;
2134 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002135 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002136 break;
2137 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002138 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002139 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002140 default: abort();
2141 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002142 *pdata = data;
2143 return res;
2144}
2145
2146static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2147 uint64_t val, unsigned size,
2148 MemTxAttrs attrs)
2149{
2150 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002151 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2152 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002153
2154 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2155 switch (size) {
2156 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002157 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002158 break;
2159 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002160 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002161 break;
2162 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002163 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002164 break;
2165 default: abort();
2166 }
2167 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002168}
2169
Avi Kivity1ec9b902012-01-02 12:47:48 +02002170static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002171 .read_with_attrs = watch_mem_read,
2172 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002173 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002174};
pbrook6658ffb2007-03-16 23:58:11 +00002175
Peter Maydellf25a49e2015-04-26 16:49:24 +01002176static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2177 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002178{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002179 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002180 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002181 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002182
blueswir1db7b5422007-05-26 17:36:03 +00002183#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002184 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002185 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002186#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002187 res = address_space_read(subpage->as, addr + subpage->base,
2188 attrs, buf, len);
2189 if (res) {
2190 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002191 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002192 switch (len) {
2193 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002194 *data = ldub_p(buf);
2195 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002196 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002197 *data = lduw_p(buf);
2198 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002199 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002200 *data = ldl_p(buf);
2201 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002202 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002203 *data = ldq_p(buf);
2204 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002205 default:
2206 abort();
2207 }
blueswir1db7b5422007-05-26 17:36:03 +00002208}
2209
Peter Maydellf25a49e2015-04-26 16:49:24 +01002210static MemTxResult subpage_write(void *opaque, hwaddr addr,
2211 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002212{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002213 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002214 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002215
blueswir1db7b5422007-05-26 17:36:03 +00002216#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002217 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002218 " value %"PRIx64"\n",
2219 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002220#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002221 switch (len) {
2222 case 1:
2223 stb_p(buf, value);
2224 break;
2225 case 2:
2226 stw_p(buf, value);
2227 break;
2228 case 4:
2229 stl_p(buf, value);
2230 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002231 case 8:
2232 stq_p(buf, value);
2233 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002234 default:
2235 abort();
2236 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002237 return address_space_write(subpage->as, addr + subpage->base,
2238 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002239}
2240
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002241static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002242 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002243{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002244 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002245#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002246 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002247 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002248#endif
2249
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002250 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002251 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002252}
2253
Avi Kivity70c68e42012-01-02 12:32:48 +02002254static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002255 .read_with_attrs = subpage_read,
2256 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002257 .impl.min_access_size = 1,
2258 .impl.max_access_size = 8,
2259 .valid.min_access_size = 1,
2260 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002261 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002262 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002263};
2264
Anthony Liguoric227f092009-10-01 16:12:16 -05002265static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002266 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002267{
2268 int idx, eidx;
2269
2270 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2271 return -1;
2272 idx = SUBPAGE_IDX(start);
2273 eidx = SUBPAGE_IDX(end);
2274#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002275 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2276 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002277#endif
blueswir1db7b5422007-05-26 17:36:03 +00002278 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002279 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002280 }
2281
2282 return 0;
2283}
2284
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002285static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002286{
Anthony Liguoric227f092009-10-01 16:12:16 -05002287 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002288
Anthony Liguori7267c092011-08-20 22:09:37 -05002289 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002290
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002291 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002292 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002293 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002294 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002295 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002296#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002297 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2298 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002299#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002300 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002301
2302 return mmio;
2303}
2304
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002305static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2306 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002307{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002308 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002309 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002310 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002311 .mr = mr,
2312 .offset_within_address_space = 0,
2313 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002314 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002315 };
2316
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002317 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002318}
2319
Peter Maydella54c87b2016-01-21 14:15:05 +00002320MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002321{
Peter Maydella54c87b2016-01-21 14:15:05 +00002322 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2323 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002324 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002325 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002326
2327 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002328}
2329
Avi Kivitye9179ce2009-06-14 11:38:52 +03002330static void io_mem_init(void)
2331{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002332 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002333 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002334 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002335 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002336 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002337 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002338 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002339}
2340
Avi Kivityac1970f2012-10-03 16:22:53 +02002341static void mem_begin(MemoryListener *listener)
2342{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002343 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002344 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2345 uint16_t n;
2346
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002347 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002348 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002349 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002350 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002351 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002352 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002353 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002354 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002355
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002356 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002357 d->as = as;
2358 as->next_dispatch = d;
2359}
2360
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002361static void address_space_dispatch_free(AddressSpaceDispatch *d)
2362{
2363 phys_sections_free(&d->map);
2364 g_free(d);
2365}
2366
Paolo Bonzini00752702013-05-29 12:13:54 +02002367static void mem_commit(MemoryListener *listener)
2368{
2369 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002370 AddressSpaceDispatch *cur = as->dispatch;
2371 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002372
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002373 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002374
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002375 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002376 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002377 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002378 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002379}
2380
Avi Kivity1d711482012-10-02 18:54:45 +02002381static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002382{
Peter Maydell32857f42015-10-01 15:29:50 +01002383 CPUAddressSpace *cpuas;
2384 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002385
2386 /* since each CPU stores ram addresses in its TLB cache, we must
2387 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002388 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2389 cpu_reloading_memory_map();
2390 /* The CPU and TLB are protected by the iothread lock.
2391 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2392 * may have split the RCU critical section.
2393 */
2394 d = atomic_rcu_read(&cpuas->as->dispatch);
2395 cpuas->memory_dispatch = d;
2396 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002397}
2398
Avi Kivityac1970f2012-10-03 16:22:53 +02002399void address_space_init_dispatch(AddressSpace *as)
2400{
Paolo Bonzini00752702013-05-29 12:13:54 +02002401 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002402 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002403 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002404 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002405 .region_add = mem_add,
2406 .region_nop = mem_add,
2407 .priority = 0,
2408 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002409 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002410}
2411
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002412void address_space_unregister(AddressSpace *as)
2413{
2414 memory_listener_unregister(&as->dispatch_listener);
2415}
2416
Avi Kivity83f3c252012-10-07 12:59:55 +02002417void address_space_destroy_dispatch(AddressSpace *as)
2418{
2419 AddressSpaceDispatch *d = as->dispatch;
2420
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002421 atomic_rcu_set(&as->dispatch, NULL);
2422 if (d) {
2423 call_rcu(d, address_space_dispatch_free, rcu);
2424 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002425}
2426
Avi Kivity62152b82011-07-26 14:26:14 +03002427static void memory_map_init(void)
2428{
Anthony Liguori7267c092011-08-20 22:09:37 -05002429 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002430
Paolo Bonzini57271d62013-11-07 17:14:37 +01002431 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002432 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002433
Anthony Liguori7267c092011-08-20 22:09:37 -05002434 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002435 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2436 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002437 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002438}
2439
2440MemoryRegion *get_system_memory(void)
2441{
2442 return system_memory;
2443}
2444
Avi Kivity309cb472011-08-08 16:09:03 +03002445MemoryRegion *get_system_io(void)
2446{
2447 return system_io;
2448}
2449
pbrooke2eef172008-06-08 01:09:01 +00002450#endif /* !defined(CONFIG_USER_ONLY) */
2451
bellard13eb76e2004-01-24 15:23:36 +00002452/* physical memory access (slow version, mainly for debug) */
2453#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002454int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002455 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002456{
2457 int l, flags;
2458 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002459 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002460
2461 while (len > 0) {
2462 page = addr & TARGET_PAGE_MASK;
2463 l = (page + TARGET_PAGE_SIZE) - addr;
2464 if (l > len)
2465 l = len;
2466 flags = page_get_flags(page);
2467 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002468 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002469 if (is_write) {
2470 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002471 return -1;
bellard579a97f2007-11-11 14:26:47 +00002472 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002473 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002474 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002475 memcpy(p, buf, l);
2476 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002477 } else {
2478 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002479 return -1;
bellard579a97f2007-11-11 14:26:47 +00002480 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002481 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002482 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002483 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002484 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002485 }
2486 len -= l;
2487 buf += l;
2488 addr += l;
2489 }
Paul Brooka68fe892010-03-01 00:08:59 +00002490 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002491}
bellard8df1cd02005-01-28 22:37:22 +00002492
bellard13eb76e2004-01-24 15:23:36 +00002493#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002494
Paolo Bonzini845b6212015-03-23 11:45:53 +01002495static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002496 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002497{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002498 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2499 /* No early return if dirty_log_mask is or becomes 0, because
2500 * cpu_physical_memory_set_dirty_range will still call
2501 * xen_modified_memory.
2502 */
2503 if (dirty_log_mask) {
2504 dirty_log_mask =
2505 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002506 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002507 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2508 tb_invalidate_phys_range(addr, addr + length);
2509 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2510 }
2511 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002512}
2513
Richard Henderson23326162013-07-08 14:55:59 -07002514static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002515{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002516 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002517
2518 /* Regions are assumed to support 1-4 byte accesses unless
2519 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002520 if (access_size_max == 0) {
2521 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002522 }
Richard Henderson23326162013-07-08 14:55:59 -07002523
2524 /* Bound the maximum access by the alignment of the address. */
2525 if (!mr->ops->impl.unaligned) {
2526 unsigned align_size_max = addr & -addr;
2527 if (align_size_max != 0 && align_size_max < access_size_max) {
2528 access_size_max = align_size_max;
2529 }
2530 }
2531
2532 /* Don't attempt accesses larger than the maximum. */
2533 if (l > access_size_max) {
2534 l = access_size_max;
2535 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002536 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002537
2538 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002539}
2540
Jan Kiszka4840f102015-06-18 18:47:22 +02002541static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002542{
Jan Kiszka4840f102015-06-18 18:47:22 +02002543 bool unlocked = !qemu_mutex_iothread_locked();
2544 bool release_lock = false;
2545
2546 if (unlocked && mr->global_locking) {
2547 qemu_mutex_lock_iothread();
2548 unlocked = false;
2549 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002550 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002551 if (mr->flush_coalesced_mmio) {
2552 if (unlocked) {
2553 qemu_mutex_lock_iothread();
2554 }
2555 qemu_flush_coalesced_mmio_buffer();
2556 if (unlocked) {
2557 qemu_mutex_unlock_iothread();
2558 }
2559 }
2560
2561 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002562}
2563
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002564/* Called within RCU critical section. */
2565static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2566 MemTxAttrs attrs,
2567 const uint8_t *buf,
2568 int len, hwaddr addr1,
2569 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002570{
bellard13eb76e2004-01-24 15:23:36 +00002571 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002572 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002573 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002574 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002575
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002576 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002577 if (!memory_access_is_direct(mr, true)) {
2578 release_lock |= prepare_mmio_access(mr);
2579 l = memory_access_size(mr, l, addr1);
2580 /* XXX: could force current_cpu to NULL to avoid
2581 potential bugs */
2582 switch (l) {
2583 case 8:
2584 /* 64 bit write access */
2585 val = ldq_p(buf);
2586 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2587 attrs);
2588 break;
2589 case 4:
2590 /* 32 bit write access */
2591 val = ldl_p(buf);
2592 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2593 attrs);
2594 break;
2595 case 2:
2596 /* 16 bit write access */
2597 val = lduw_p(buf);
2598 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2599 attrs);
2600 break;
2601 case 1:
2602 /* 8 bit write access */
2603 val = ldub_p(buf);
2604 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2605 attrs);
2606 break;
2607 default:
2608 abort();
bellard13eb76e2004-01-24 15:23:36 +00002609 }
2610 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002611 addr1 += memory_region_get_ram_addr(mr);
2612 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002613 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002614 memcpy(ptr, buf, l);
2615 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002616 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002617
2618 if (release_lock) {
2619 qemu_mutex_unlock_iothread();
2620 release_lock = false;
2621 }
2622
bellard13eb76e2004-01-24 15:23:36 +00002623 len -= l;
2624 buf += l;
2625 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002626
2627 if (!len) {
2628 break;
2629 }
2630
2631 l = len;
2632 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002633 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002634
Peter Maydell3b643492015-04-26 16:49:23 +01002635 return result;
bellard13eb76e2004-01-24 15:23:36 +00002636}
bellard8df1cd02005-01-28 22:37:22 +00002637
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002638MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2639 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002640{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002641 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002642 hwaddr addr1;
2643 MemoryRegion *mr;
2644 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002645
2646 if (len > 0) {
2647 rcu_read_lock();
2648 l = len;
2649 mr = address_space_translate(as, addr, &addr1, &l, true);
2650 result = address_space_write_continue(as, addr, attrs, buf, len,
2651 addr1, l, mr);
2652 rcu_read_unlock();
2653 }
2654
2655 return result;
2656}
2657
2658/* Called within RCU critical section. */
2659MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2660 MemTxAttrs attrs, uint8_t *buf,
2661 int len, hwaddr addr1, hwaddr l,
2662 MemoryRegion *mr)
2663{
2664 uint8_t *ptr;
2665 uint64_t val;
2666 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002667 bool release_lock = false;
2668
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002669 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002670 if (!memory_access_is_direct(mr, false)) {
2671 /* I/O case */
2672 release_lock |= prepare_mmio_access(mr);
2673 l = memory_access_size(mr, l, addr1);
2674 switch (l) {
2675 case 8:
2676 /* 64 bit read access */
2677 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2678 attrs);
2679 stq_p(buf, val);
2680 break;
2681 case 4:
2682 /* 32 bit read access */
2683 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2684 attrs);
2685 stl_p(buf, val);
2686 break;
2687 case 2:
2688 /* 16 bit read access */
2689 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2690 attrs);
2691 stw_p(buf, val);
2692 break;
2693 case 1:
2694 /* 8 bit read access */
2695 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2696 attrs);
2697 stb_p(buf, val);
2698 break;
2699 default:
2700 abort();
2701 }
2702 } else {
2703 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002704 ptr = qemu_get_ram_ptr(mr->ram_block,
2705 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002706 memcpy(buf, ptr, l);
2707 }
2708
2709 if (release_lock) {
2710 qemu_mutex_unlock_iothread();
2711 release_lock = false;
2712 }
2713
2714 len -= l;
2715 buf += l;
2716 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002717
2718 if (!len) {
2719 break;
2720 }
2721
2722 l = len;
2723 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002724 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002725
2726 return result;
2727}
2728
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002729MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2730 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002731{
2732 hwaddr l;
2733 hwaddr addr1;
2734 MemoryRegion *mr;
2735 MemTxResult result = MEMTX_OK;
2736
2737 if (len > 0) {
2738 rcu_read_lock();
2739 l = len;
2740 mr = address_space_translate(as, addr, &addr1, &l, false);
2741 result = address_space_read_continue(as, addr, attrs, buf, len,
2742 addr1, l, mr);
2743 rcu_read_unlock();
2744 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002745
2746 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002747}
2748
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002749MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2750 uint8_t *buf, int len, bool is_write)
2751{
2752 if (is_write) {
2753 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2754 } else {
2755 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2756 }
2757}
Avi Kivityac1970f2012-10-03 16:22:53 +02002758
Avi Kivitya8170e52012-10-23 12:30:10 +02002759void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002760 int len, int is_write)
2761{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002762 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2763 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002764}
2765
Alexander Graf582b55a2013-12-11 14:17:44 +01002766enum write_rom_type {
2767 WRITE_DATA,
2768 FLUSH_CACHE,
2769};
2770
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002771static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002772 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002773{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002774 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002775 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002776 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002777 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002778
Paolo Bonzini41063e12015-03-18 14:21:43 +01002779 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002780 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002781 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002782 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002783
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002784 if (!(memory_region_is_ram(mr) ||
2785 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002786 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002787 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002788 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002789 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002790 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002791 switch (type) {
2792 case WRITE_DATA:
2793 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002794 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002795 break;
2796 case FLUSH_CACHE:
2797 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2798 break;
2799 }
bellardd0ecd2a2006-04-23 17:14:48 +00002800 }
2801 len -= l;
2802 buf += l;
2803 addr += l;
2804 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002805 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002806}
2807
Alexander Graf582b55a2013-12-11 14:17:44 +01002808/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002809void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002810 const uint8_t *buf, int len)
2811{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002812 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002813}
2814
2815void cpu_flush_icache_range(hwaddr start, int len)
2816{
2817 /*
2818 * This function should do the same thing as an icache flush that was
2819 * triggered from within the guest. For TCG we are always cache coherent,
2820 * so there is no need to flush anything. For KVM / Xen we need to flush
2821 * the host's instruction cache at least.
2822 */
2823 if (tcg_enabled()) {
2824 return;
2825 }
2826
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002827 cpu_physical_memory_write_rom_internal(&address_space_memory,
2828 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002829}
2830
aliguori6d16c2f2009-01-22 16:59:11 +00002831typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002832 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002833 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002834 hwaddr addr;
2835 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002836 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002837} BounceBuffer;
2838
2839static BounceBuffer bounce;
2840
aliguoriba223c22009-01-22 16:59:16 +00002841typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002842 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002843 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002844} MapClient;
2845
Fam Zheng38e047b2015-03-16 17:03:35 +08002846QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002847static QLIST_HEAD(map_client_list, MapClient) map_client_list
2848 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002849
Fam Zhenge95205e2015-03-16 17:03:37 +08002850static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002851{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002852 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002853 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002854}
2855
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002856static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002857{
2858 MapClient *client;
2859
Blue Swirl72cf2d42009-09-12 07:36:22 +00002860 while (!QLIST_EMPTY(&map_client_list)) {
2861 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002862 qemu_bh_schedule(client->bh);
2863 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002864 }
2865}
2866
Fam Zhenge95205e2015-03-16 17:03:37 +08002867void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002868{
2869 MapClient *client = g_malloc(sizeof(*client));
2870
Fam Zheng38e047b2015-03-16 17:03:35 +08002871 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002872 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002873 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002874 if (!atomic_read(&bounce.in_use)) {
2875 cpu_notify_map_clients_locked();
2876 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002877 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002878}
2879
Fam Zheng38e047b2015-03-16 17:03:35 +08002880void cpu_exec_init_all(void)
2881{
2882 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002883 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002884 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002885 qemu_mutex_init(&map_client_list_lock);
2886}
2887
Fam Zhenge95205e2015-03-16 17:03:37 +08002888void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002889{
Fam Zhenge95205e2015-03-16 17:03:37 +08002890 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002891
Fam Zhenge95205e2015-03-16 17:03:37 +08002892 qemu_mutex_lock(&map_client_list_lock);
2893 QLIST_FOREACH(client, &map_client_list, link) {
2894 if (client->bh == bh) {
2895 cpu_unregister_map_client_do(client);
2896 break;
2897 }
2898 }
2899 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002900}
2901
2902static void cpu_notify_map_clients(void)
2903{
Fam Zheng38e047b2015-03-16 17:03:35 +08002904 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002905 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002906 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002907}
2908
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002909bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2910{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002911 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002912 hwaddr l, xlat;
2913
Paolo Bonzini41063e12015-03-18 14:21:43 +01002914 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002915 while (len > 0) {
2916 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002917 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2918 if (!memory_access_is_direct(mr, is_write)) {
2919 l = memory_access_size(mr, l, addr);
2920 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002921 return false;
2922 }
2923 }
2924
2925 len -= l;
2926 addr += l;
2927 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002928 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002929 return true;
2930}
2931
aliguori6d16c2f2009-01-22 16:59:11 +00002932/* Map a physical memory region into a host virtual address.
2933 * May map a subset of the requested range, given by and returned in *plen.
2934 * May return NULL if resources needed to perform the mapping are exhausted.
2935 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002936 * Use cpu_register_map_client() to know when retrying the map operation is
2937 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002938 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002939void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002940 hwaddr addr,
2941 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002942 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002943{
Avi Kivitya8170e52012-10-23 12:30:10 +02002944 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002945 hwaddr done = 0;
2946 hwaddr l, xlat, base;
2947 MemoryRegion *mr, *this_mr;
2948 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002949 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002950
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002951 if (len == 0) {
2952 return NULL;
2953 }
aliguori6d16c2f2009-01-22 16:59:11 +00002954
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002955 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002956 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002957 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002958
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002959 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002960 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002961 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002962 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002963 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002964 /* Avoid unbounded allocations */
2965 l = MIN(l, TARGET_PAGE_SIZE);
2966 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002967 bounce.addr = addr;
2968 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002969
2970 memory_region_ref(mr);
2971 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002972 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002973 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2974 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002975 }
aliguori6d16c2f2009-01-22 16:59:11 +00002976
Paolo Bonzini41063e12015-03-18 14:21:43 +01002977 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002978 *plen = l;
2979 return bounce.buffer;
2980 }
2981
2982 base = xlat;
2983 raddr = memory_region_get_ram_addr(mr);
2984
2985 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002986 len -= l;
2987 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002988 done += l;
2989 if (len == 0) {
2990 break;
2991 }
2992
2993 l = len;
2994 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2995 if (this_mr != mr || xlat != base + done) {
2996 break;
2997 }
aliguori6d16c2f2009-01-22 16:59:11 +00002998 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002999
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003000 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003001 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003002 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003003 rcu_read_unlock();
3004
3005 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003006}
3007
Avi Kivityac1970f2012-10-03 16:22:53 +02003008/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003009 * Will also mark the memory as dirty if is_write == 1. access_len gives
3010 * the amount of memory that was actually read or written by the caller.
3011 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003012void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3013 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003014{
3015 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003016 MemoryRegion *mr;
3017 ram_addr_t addr1;
3018
3019 mr = qemu_ram_addr_from_host(buffer, &addr1);
3020 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003021 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003022 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003023 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003024 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003025 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003026 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003027 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003028 return;
3029 }
3030 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003031 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3032 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003033 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003034 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003035 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003036 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003037 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003038 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003039}
bellardd0ecd2a2006-04-23 17:14:48 +00003040
Avi Kivitya8170e52012-10-23 12:30:10 +02003041void *cpu_physical_memory_map(hwaddr addr,
3042 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003043 int is_write)
3044{
3045 return address_space_map(&address_space_memory, addr, plen, is_write);
3046}
3047
Avi Kivitya8170e52012-10-23 12:30:10 +02003048void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3049 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003050{
3051 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3052}
3053
bellard8df1cd02005-01-28 22:37:22 +00003054/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003055static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3056 MemTxAttrs attrs,
3057 MemTxResult *result,
3058 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003059{
bellard8df1cd02005-01-28 22:37:22 +00003060 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003061 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003062 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003063 hwaddr l = 4;
3064 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003065 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003066 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003067
Paolo Bonzini41063e12015-03-18 14:21:43 +01003068 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003069 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003070 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003071 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003072
bellard8df1cd02005-01-28 22:37:22 +00003073 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003074 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003075#if defined(TARGET_WORDS_BIGENDIAN)
3076 if (endian == DEVICE_LITTLE_ENDIAN) {
3077 val = bswap32(val);
3078 }
3079#else
3080 if (endian == DEVICE_BIG_ENDIAN) {
3081 val = bswap32(val);
3082 }
3083#endif
bellard8df1cd02005-01-28 22:37:22 +00003084 } else {
3085 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003086 ptr = qemu_get_ram_ptr(mr->ram_block,
3087 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003088 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003089 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003090 switch (endian) {
3091 case DEVICE_LITTLE_ENDIAN:
3092 val = ldl_le_p(ptr);
3093 break;
3094 case DEVICE_BIG_ENDIAN:
3095 val = ldl_be_p(ptr);
3096 break;
3097 default:
3098 val = ldl_p(ptr);
3099 break;
3100 }
Peter Maydell50013112015-04-26 16:49:24 +01003101 r = MEMTX_OK;
3102 }
3103 if (result) {
3104 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003105 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003106 if (release_lock) {
3107 qemu_mutex_unlock_iothread();
3108 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003109 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003110 return val;
3111}
3112
Peter Maydell50013112015-04-26 16:49:24 +01003113uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3114 MemTxAttrs attrs, MemTxResult *result)
3115{
3116 return address_space_ldl_internal(as, addr, attrs, result,
3117 DEVICE_NATIVE_ENDIAN);
3118}
3119
3120uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3121 MemTxAttrs attrs, MemTxResult *result)
3122{
3123 return address_space_ldl_internal(as, addr, attrs, result,
3124 DEVICE_LITTLE_ENDIAN);
3125}
3126
3127uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3128 MemTxAttrs attrs, MemTxResult *result)
3129{
3130 return address_space_ldl_internal(as, addr, attrs, result,
3131 DEVICE_BIG_ENDIAN);
3132}
3133
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003134uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003135{
Peter Maydell50013112015-04-26 16:49:24 +01003136 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003137}
3138
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003139uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003140{
Peter Maydell50013112015-04-26 16:49:24 +01003141 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003142}
3143
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003144uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003145{
Peter Maydell50013112015-04-26 16:49:24 +01003146 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003147}
3148
bellard84b7b8e2005-11-28 21:19:04 +00003149/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003150static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3151 MemTxAttrs attrs,
3152 MemTxResult *result,
3153 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003154{
bellard84b7b8e2005-11-28 21:19:04 +00003155 uint8_t *ptr;
3156 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003157 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003158 hwaddr l = 8;
3159 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003160 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003161 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003162
Paolo Bonzini41063e12015-03-18 14:21:43 +01003163 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003164 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003165 false);
3166 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003167 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003168
bellard84b7b8e2005-11-28 21:19:04 +00003169 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003170 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003171#if defined(TARGET_WORDS_BIGENDIAN)
3172 if (endian == DEVICE_LITTLE_ENDIAN) {
3173 val = bswap64(val);
3174 }
3175#else
3176 if (endian == DEVICE_BIG_ENDIAN) {
3177 val = bswap64(val);
3178 }
3179#endif
bellard84b7b8e2005-11-28 21:19:04 +00003180 } else {
3181 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003182 ptr = qemu_get_ram_ptr(mr->ram_block,
3183 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003184 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003185 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003186 switch (endian) {
3187 case DEVICE_LITTLE_ENDIAN:
3188 val = ldq_le_p(ptr);
3189 break;
3190 case DEVICE_BIG_ENDIAN:
3191 val = ldq_be_p(ptr);
3192 break;
3193 default:
3194 val = ldq_p(ptr);
3195 break;
3196 }
Peter Maydell50013112015-04-26 16:49:24 +01003197 r = MEMTX_OK;
3198 }
3199 if (result) {
3200 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003201 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003202 if (release_lock) {
3203 qemu_mutex_unlock_iothread();
3204 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003205 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003206 return val;
3207}
3208
Peter Maydell50013112015-04-26 16:49:24 +01003209uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3210 MemTxAttrs attrs, MemTxResult *result)
3211{
3212 return address_space_ldq_internal(as, addr, attrs, result,
3213 DEVICE_NATIVE_ENDIAN);
3214}
3215
3216uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3217 MemTxAttrs attrs, MemTxResult *result)
3218{
3219 return address_space_ldq_internal(as, addr, attrs, result,
3220 DEVICE_LITTLE_ENDIAN);
3221}
3222
3223uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3224 MemTxAttrs attrs, MemTxResult *result)
3225{
3226 return address_space_ldq_internal(as, addr, attrs, result,
3227 DEVICE_BIG_ENDIAN);
3228}
3229
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003230uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003231{
Peter Maydell50013112015-04-26 16:49:24 +01003232 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003233}
3234
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003235uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003236{
Peter Maydell50013112015-04-26 16:49:24 +01003237 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003238}
3239
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003240uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003241{
Peter Maydell50013112015-04-26 16:49:24 +01003242 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003243}
3244
bellardaab33092005-10-30 20:48:42 +00003245/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003246uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3247 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003248{
3249 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003250 MemTxResult r;
3251
3252 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3253 if (result) {
3254 *result = r;
3255 }
bellardaab33092005-10-30 20:48:42 +00003256 return val;
3257}
3258
Peter Maydell50013112015-04-26 16:49:24 +01003259uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3260{
3261 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3262}
3263
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003264/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003265static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3266 hwaddr addr,
3267 MemTxAttrs attrs,
3268 MemTxResult *result,
3269 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003270{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003271 uint8_t *ptr;
3272 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003273 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003274 hwaddr l = 2;
3275 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003276 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003277 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003278
Paolo Bonzini41063e12015-03-18 14:21:43 +01003279 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003280 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003281 false);
3282 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003283 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003284
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003285 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003286 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003287#if defined(TARGET_WORDS_BIGENDIAN)
3288 if (endian == DEVICE_LITTLE_ENDIAN) {
3289 val = bswap16(val);
3290 }
3291#else
3292 if (endian == DEVICE_BIG_ENDIAN) {
3293 val = bswap16(val);
3294 }
3295#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003296 } else {
3297 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003298 ptr = qemu_get_ram_ptr(mr->ram_block,
3299 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003300 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003301 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003302 switch (endian) {
3303 case DEVICE_LITTLE_ENDIAN:
3304 val = lduw_le_p(ptr);
3305 break;
3306 case DEVICE_BIG_ENDIAN:
3307 val = lduw_be_p(ptr);
3308 break;
3309 default:
3310 val = lduw_p(ptr);
3311 break;
3312 }
Peter Maydell50013112015-04-26 16:49:24 +01003313 r = MEMTX_OK;
3314 }
3315 if (result) {
3316 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003317 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003318 if (release_lock) {
3319 qemu_mutex_unlock_iothread();
3320 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003321 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003322 return val;
bellardaab33092005-10-30 20:48:42 +00003323}
3324
Peter Maydell50013112015-04-26 16:49:24 +01003325uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3326 MemTxAttrs attrs, MemTxResult *result)
3327{
3328 return address_space_lduw_internal(as, addr, attrs, result,
3329 DEVICE_NATIVE_ENDIAN);
3330}
3331
3332uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3333 MemTxAttrs attrs, MemTxResult *result)
3334{
3335 return address_space_lduw_internal(as, addr, attrs, result,
3336 DEVICE_LITTLE_ENDIAN);
3337}
3338
3339uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3340 MemTxAttrs attrs, MemTxResult *result)
3341{
3342 return address_space_lduw_internal(as, addr, attrs, result,
3343 DEVICE_BIG_ENDIAN);
3344}
3345
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003346uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003347{
Peter Maydell50013112015-04-26 16:49:24 +01003348 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003349}
3350
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003351uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003352{
Peter Maydell50013112015-04-26 16:49:24 +01003353 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003354}
3355
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003356uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003357{
Peter Maydell50013112015-04-26 16:49:24 +01003358 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003359}
3360
bellard8df1cd02005-01-28 22:37:22 +00003361/* warning: addr must be aligned. The ram page is not masked as dirty
3362 and the code inside is not invalidated. It is useful if the dirty
3363 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003364void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3365 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003366{
bellard8df1cd02005-01-28 22:37:22 +00003367 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003368 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003369 hwaddr l = 4;
3370 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003371 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003372 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003373 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003374
Paolo Bonzini41063e12015-03-18 14:21:43 +01003375 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003376 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003377 true);
3378 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003379 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003380
Peter Maydell50013112015-04-26 16:49:24 +01003381 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003382 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003383 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003384 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003385 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003386
Paolo Bonzini845b6212015-03-23 11:45:53 +01003387 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3388 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003389 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003390 r = MEMTX_OK;
3391 }
3392 if (result) {
3393 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003394 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003395 if (release_lock) {
3396 qemu_mutex_unlock_iothread();
3397 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003398 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003399}
3400
Peter Maydell50013112015-04-26 16:49:24 +01003401void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3402{
3403 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3404}
3405
bellard8df1cd02005-01-28 22:37:22 +00003406/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003407static inline void address_space_stl_internal(AddressSpace *as,
3408 hwaddr addr, uint32_t val,
3409 MemTxAttrs attrs,
3410 MemTxResult *result,
3411 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003412{
bellard8df1cd02005-01-28 22:37:22 +00003413 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003414 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003415 hwaddr l = 4;
3416 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003417 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003418 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003419
Paolo Bonzini41063e12015-03-18 14:21:43 +01003420 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003421 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003422 true);
3423 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003424 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003425
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003426#if defined(TARGET_WORDS_BIGENDIAN)
3427 if (endian == DEVICE_LITTLE_ENDIAN) {
3428 val = bswap32(val);
3429 }
3430#else
3431 if (endian == DEVICE_BIG_ENDIAN) {
3432 val = bswap32(val);
3433 }
3434#endif
Peter Maydell50013112015-04-26 16:49:24 +01003435 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003436 } else {
bellard8df1cd02005-01-28 22:37:22 +00003437 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003438 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003439 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003440 switch (endian) {
3441 case DEVICE_LITTLE_ENDIAN:
3442 stl_le_p(ptr, val);
3443 break;
3444 case DEVICE_BIG_ENDIAN:
3445 stl_be_p(ptr, val);
3446 break;
3447 default:
3448 stl_p(ptr, val);
3449 break;
3450 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003451 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003452 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003453 }
Peter Maydell50013112015-04-26 16:49:24 +01003454 if (result) {
3455 *result = r;
3456 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003457 if (release_lock) {
3458 qemu_mutex_unlock_iothread();
3459 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003460 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003461}
3462
3463void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3464 MemTxAttrs attrs, MemTxResult *result)
3465{
3466 address_space_stl_internal(as, addr, val, attrs, result,
3467 DEVICE_NATIVE_ENDIAN);
3468}
3469
3470void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3471 MemTxAttrs attrs, MemTxResult *result)
3472{
3473 address_space_stl_internal(as, addr, val, attrs, result,
3474 DEVICE_LITTLE_ENDIAN);
3475}
3476
3477void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3478 MemTxAttrs attrs, MemTxResult *result)
3479{
3480 address_space_stl_internal(as, addr, val, attrs, result,
3481 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003482}
3483
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003484void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003485{
Peter Maydell50013112015-04-26 16:49:24 +01003486 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003487}
3488
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003489void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003490{
Peter Maydell50013112015-04-26 16:49:24 +01003491 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003492}
3493
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003494void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003495{
Peter Maydell50013112015-04-26 16:49:24 +01003496 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003497}
3498
bellardaab33092005-10-30 20:48:42 +00003499/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003500void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3501 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003502{
3503 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003504 MemTxResult r;
3505
3506 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3507 if (result) {
3508 *result = r;
3509 }
3510}
3511
3512void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3513{
3514 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003515}
3516
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003517/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003518static inline void address_space_stw_internal(AddressSpace *as,
3519 hwaddr addr, uint32_t val,
3520 MemTxAttrs attrs,
3521 MemTxResult *result,
3522 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003523{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003524 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003525 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003526 hwaddr l = 2;
3527 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003528 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003529 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003530
Paolo Bonzini41063e12015-03-18 14:21:43 +01003531 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003532 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003533 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003534 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003535
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003536#if defined(TARGET_WORDS_BIGENDIAN)
3537 if (endian == DEVICE_LITTLE_ENDIAN) {
3538 val = bswap16(val);
3539 }
3540#else
3541 if (endian == DEVICE_BIG_ENDIAN) {
3542 val = bswap16(val);
3543 }
3544#endif
Peter Maydell50013112015-04-26 16:49:24 +01003545 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003546 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003547 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003548 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003549 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003550 switch (endian) {
3551 case DEVICE_LITTLE_ENDIAN:
3552 stw_le_p(ptr, val);
3553 break;
3554 case DEVICE_BIG_ENDIAN:
3555 stw_be_p(ptr, val);
3556 break;
3557 default:
3558 stw_p(ptr, val);
3559 break;
3560 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003561 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003562 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003563 }
Peter Maydell50013112015-04-26 16:49:24 +01003564 if (result) {
3565 *result = r;
3566 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003567 if (release_lock) {
3568 qemu_mutex_unlock_iothread();
3569 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003570 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003571}
3572
3573void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3574 MemTxAttrs attrs, MemTxResult *result)
3575{
3576 address_space_stw_internal(as, addr, val, attrs, result,
3577 DEVICE_NATIVE_ENDIAN);
3578}
3579
3580void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3581 MemTxAttrs attrs, MemTxResult *result)
3582{
3583 address_space_stw_internal(as, addr, val, attrs, result,
3584 DEVICE_LITTLE_ENDIAN);
3585}
3586
3587void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3588 MemTxAttrs attrs, MemTxResult *result)
3589{
3590 address_space_stw_internal(as, addr, val, attrs, result,
3591 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003592}
3593
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003594void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003595{
Peter Maydell50013112015-04-26 16:49:24 +01003596 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003597}
3598
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003599void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003600{
Peter Maydell50013112015-04-26 16:49:24 +01003601 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003602}
3603
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003604void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003605{
Peter Maydell50013112015-04-26 16:49:24 +01003606 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003607}
3608
bellardaab33092005-10-30 20:48:42 +00003609/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003610void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3611 MemTxAttrs attrs, MemTxResult *result)
3612{
3613 MemTxResult r;
3614 val = tswap64(val);
3615 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3616 if (result) {
3617 *result = r;
3618 }
3619}
3620
3621void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3622 MemTxAttrs attrs, MemTxResult *result)
3623{
3624 MemTxResult r;
3625 val = cpu_to_le64(val);
3626 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3627 if (result) {
3628 *result = r;
3629 }
3630}
3631void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3632 MemTxAttrs attrs, MemTxResult *result)
3633{
3634 MemTxResult r;
3635 val = cpu_to_be64(val);
3636 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3637 if (result) {
3638 *result = r;
3639 }
3640}
3641
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003642void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003643{
Peter Maydell50013112015-04-26 16:49:24 +01003644 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003645}
3646
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003647void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003648{
Peter Maydell50013112015-04-26 16:49:24 +01003649 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003650}
3651
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003652void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003653{
Peter Maydell50013112015-04-26 16:49:24 +01003654 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003655}
3656
aliguori5e2972f2009-03-28 17:51:36 +00003657/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003658int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003659 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003660{
3661 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003662 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003663 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003664
3665 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003666 int asidx;
3667 MemTxAttrs attrs;
3668
bellard13eb76e2004-01-24 15:23:36 +00003669 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003670 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3671 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003672 /* if no physical page mapped, return an error */
3673 if (phys_addr == -1)
3674 return -1;
3675 l = (page + TARGET_PAGE_SIZE) - addr;
3676 if (l > len)
3677 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003678 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003679 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003680 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3681 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003682 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003683 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3684 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003685 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003686 }
bellard13eb76e2004-01-24 15:23:36 +00003687 len -= l;
3688 buf += l;
3689 addr += l;
3690 }
3691 return 0;
3692}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003693
3694/*
3695 * Allows code that needs to deal with migration bitmaps etc to still be built
3696 * target independent.
3697 */
3698size_t qemu_target_page_bits(void)
3699{
3700 return TARGET_PAGE_BITS;
3701}
3702
Paul Brooka68fe892010-03-01 00:08:59 +00003703#endif
bellard13eb76e2004-01-24 15:23:36 +00003704
Blue Swirl8e4a4242013-01-06 18:30:17 +00003705/*
3706 * A helper function for the _utterly broken_ virtio device model to find out if
3707 * it's running on a big endian machine. Don't do this at home kids!
3708 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003709bool target_words_bigendian(void);
3710bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003711{
3712#if defined(TARGET_WORDS_BIGENDIAN)
3713 return true;
3714#else
3715 return false;
3716#endif
3717}
3718
Wen Congyang76f35532012-05-07 12:04:18 +08003719#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003720bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003721{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003722 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003723 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003724 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003725
Paolo Bonzini41063e12015-03-18 14:21:43 +01003726 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003727 mr = address_space_translate(&address_space_memory,
3728 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003729
Paolo Bonzini41063e12015-03-18 14:21:43 +01003730 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3731 rcu_read_unlock();
3732 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003733}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003734
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003735int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003736{
3737 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003738 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003739
Mike Day0dc3f442013-09-05 14:41:35 -04003740 rcu_read_lock();
3741 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003742 ret = func(block->idstr, block->host, block->offset,
3743 block->used_length, opaque);
3744 if (ret) {
3745 break;
3746 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003747 }
Mike Day0dc3f442013-09-05 14:41:35 -04003748 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003749 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003750}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003751#endif