blob: ee45472cabdad2230fa2c268de076ec72da45a78 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020025#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010035#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/timer.h"
37#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020038#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010039#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010040#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010041#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000042#if defined(CONFIG_USER_ONLY)
43#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010044#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010045#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010046#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000047#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010048#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040049#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020050#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000051#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030052#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030056#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020057
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020058#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030059#ifndef _WIN32
60#include "qemu/mmap-alloc.h"
61#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020062
blueswir1db7b5422007-05-26 17:36:03 +000063//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000064
pbrook99773bd2006-04-16 15:14:59 +000065#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040066/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
67 * are protected by the ramlist lock.
68 */
Mike Day0d53d9f2015-01-21 13:45:24 +010069RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030070
71static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030072static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030073
Avi Kivityf6790af2012-10-02 20:13:51 +020074AddressSpace address_space_io;
75AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020076
Paolo Bonzini0844e002013-05-24 14:37:28 +020077MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020078static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020079
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080080/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
81#define RAM_PREALLOC (1 << 0)
82
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080083/* RAM is mmap-ed with MAP_SHARED */
84#define RAM_SHARED (1 << 1)
85
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020086/* Only a portion of RAM (used_length) is actually used, and migrated.
87 * This used_length size can change across reboots.
88 */
89#define RAM_RESIZEABLE (1 << 2)
90
pbrooke2eef172008-06-08 01:09:01 +000091#endif
bellard9fa3e852004-01-04 18:06:42 +000092
Andreas Färberbdc44642013-06-24 23:50:24 +020093struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000094/* current CPU in the current thread. It is only valid inside
95 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020096__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +000097/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000098 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000099 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100100int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000101
pbrooke2eef172008-06-08 01:09:01 +0000102#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200103
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200104typedef struct PhysPageEntry PhysPageEntry;
105
106struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200109 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200110 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200111};
112
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
114
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100116#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200118#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100119#define P_L2_SIZE (1 << P_L2_BITS)
120
121#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
122
123typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200125typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100126 struct rcu_head rcu;
127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128 unsigned sections_nb;
129 unsigned sections_nb_alloc;
130 unsigned nodes_nb;
131 unsigned nodes_nb_alloc;
132 Node *nodes;
133 MemoryRegionSection *sections;
134} PhysPageMap;
135
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200136struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100137 struct rcu_head rcu;
138
Fam Zheng729633c2016-03-01 14:18:24 +0800139 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200140 /* This is a multi-level map on the physical address space.
141 * The bottom level has pointers to MemoryRegionSections.
142 */
143 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200144 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200145 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200146};
147
Jan Kiszka90260c62013-05-26 21:46:51 +0200148#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
149typedef struct subpage_t {
150 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200151 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200152 hwaddr base;
153 uint16_t sub_section[TARGET_PAGE_SIZE];
154} subpage_t;
155
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200156#define PHYS_SECTION_UNASSIGNED 0
157#define PHYS_SECTION_NOTDIRTY 1
158#define PHYS_SECTION_ROM 2
159#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200160
pbrooke2eef172008-06-08 01:09:01 +0000161static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300162static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000163static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000164
Avi Kivity1ec9b902012-01-02 12:47:48 +0200165static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100166
167/**
168 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
169 * @cpu: the CPU whose AddressSpace this is
170 * @as: the AddressSpace itself
171 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
172 * @tcg_as_listener: listener for tracking changes to the AddressSpace
173 */
174struct CPUAddressSpace {
175 CPUState *cpu;
176 AddressSpace *as;
177 struct AddressSpaceDispatch *memory_dispatch;
178 MemoryListener tcg_as_listener;
179};
180
pbrook6658ffb2007-03-16 23:58:11 +0000181#endif
bellard54936002003-05-13 00:25:15 +0000182
Paul Brook6d9a1302010-02-28 23:55:53 +0000183#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200184
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200186{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200187 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
188 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
189 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
190 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200191 }
192}
193
Paolo Bonzinidb946042015-05-21 15:12:29 +0200194static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200195{
196 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200197 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200198 PhysPageEntry e;
199 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200201 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200202 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200204 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200205
206 e.skip = leaf ? 0 : 1;
207 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100208 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200209 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200210 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200211 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200212}
213
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200214static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
215 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200216 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200217{
218 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100219 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200220
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200221 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200222 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200224 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100225 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200226
Paolo Bonzini03f49952013-11-07 17:14:36 +0100227 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200228 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200229 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200230 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200231 *index += step;
232 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200233 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200234 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200235 }
236 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200237 }
238}
239
Avi Kivityac1970f2012-10-03 16:22:53 +0200240static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200241 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200242 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000243{
Avi Kivity29990972012-02-13 20:21:20 +0200244 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200245 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000246
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200247 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000248}
249
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200250/* Compact a non leaf page entry. Simply detect that the entry has a single child,
251 * and update our entry so we can skip it and go directly to the destination.
252 */
253static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
254{
255 unsigned valid_ptr = P_L2_SIZE;
256 int valid = 0;
257 PhysPageEntry *p;
258 int i;
259
260 if (lp->ptr == PHYS_MAP_NODE_NIL) {
261 return;
262 }
263
264 p = nodes[lp->ptr];
265 for (i = 0; i < P_L2_SIZE; i++) {
266 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
267 continue;
268 }
269
270 valid_ptr = i;
271 valid++;
272 if (p[i].skip) {
273 phys_page_compact(&p[i], nodes, compacted);
274 }
275 }
276
277 /* We can only compress if there's only one child. */
278 if (valid != 1) {
279 return;
280 }
281
282 assert(valid_ptr < P_L2_SIZE);
283
284 /* Don't compress if it won't fit in the # of bits we have. */
285 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
286 return;
287 }
288
289 lp->ptr = p[valid_ptr].ptr;
290 if (!p[valid_ptr].skip) {
291 /* If our only child is a leaf, make this a leaf. */
292 /* By design, we should have made this node a leaf to begin with so we
293 * should never reach here.
294 * But since it's so simple to handle this, let's do it just in case we
295 * change this rule.
296 */
297 lp->skip = 0;
298 } else {
299 lp->skip += p[valid_ptr].skip;
300 }
301}
302
303static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
304{
305 DECLARE_BITMAP(compacted, nodes_nb);
306
307 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200308 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200309 }
310}
311
Fam Zheng29cb5332016-03-01 14:18:23 +0800312static inline bool section_covers_addr(const MemoryRegionSection *section,
313 hwaddr addr)
314{
315 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
316 * the section must cover the entire address space.
317 */
318 return section->size.hi ||
319 range_covers_byte(section->offset_within_address_space,
320 section->size.lo, addr);
321}
322
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200323static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200324 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000325{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200326 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200327 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200328 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200329
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200330 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200331 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200332 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200334 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100335 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200336 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200337
Fam Zheng29cb5332016-03-01 14:18:23 +0800338 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200339 return &sections[lp.ptr];
340 } else {
341 return &sections[PHYS_SECTION_UNASSIGNED];
342 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200343}
344
Blue Swirle5548612012-04-21 13:08:33 +0000345bool memory_region_is_unassigned(MemoryRegion *mr)
346{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200347 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000348 && mr != &io_mem_watch;
349}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200350
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100351/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200352static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200353 hwaddr addr,
354 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200355{
Fam Zheng729633c2016-03-01 14:18:24 +0800356 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200357 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800358 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200359
Fam Zheng729633c2016-03-01 14:18:24 +0800360 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
361 section_covers_addr(section, addr)) {
362 update = false;
363 } else {
364 section = phys_page_find(d->phys_map, addr, d->map.nodes,
365 d->map.sections);
366 update = true;
367 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200368 if (resolve_subpage && section->mr->subpage) {
369 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200370 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200371 }
Fam Zheng729633c2016-03-01 14:18:24 +0800372 if (update) {
373 atomic_set(&d->mru_section, section);
374 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200375 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200376}
377
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100378/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200379static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200380address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200381 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200382{
383 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200384 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100385 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200386
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200387 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200388 /* Compute offset within MemoryRegionSection */
389 addr -= section->offset_within_address_space;
390
391 /* Compute offset within MemoryRegion */
392 *xlat = addr + section->offset_within_region;
393
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200394 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200395
396 /* MMIO registers can be expected to perform full-width accesses based only
397 * on their address, without considering adjacent registers that could
398 * decode to completely different MemoryRegions. When such registers
399 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
400 * regions overlap wildly. For this reason we cannot clamp the accesses
401 * here.
402 *
403 * If the length is small (as is the case for address_space_ldl/stl),
404 * everything works fine. If the incoming length is large, however,
405 * the caller really has to do the clamping through memory_access_size.
406 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200407 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200408 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200409 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
410 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200411 return section;
412}
Jan Kiszka90260c62013-05-26 21:46:51 +0200413
Paolo Bonzini41063e12015-03-18 14:21:43 +0100414/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200415MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
416 hwaddr *xlat, hwaddr *plen,
417 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200418{
Avi Kivity30951152012-10-30 13:47:46 +0200419 IOMMUTLBEntry iotlb;
420 MemoryRegionSection *section;
421 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200422
423 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100424 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
425 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200426 mr = section->mr;
427
428 if (!mr->iommu_ops) {
429 break;
430 }
431
Le Tan8d7b8cb2014-08-16 13:55:37 +0800432 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200433 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
434 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700435 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200436 if (!(iotlb.perm & (1 << is_write))) {
437 mr = &io_mem_unassigned;
438 break;
439 }
440
441 as = iotlb.target_as;
442 }
443
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000444 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100445 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700446 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100447 }
448
Avi Kivity30951152012-10-30 13:47:46 +0200449 *xlat = addr;
450 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200451}
452
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100453/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200454MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000455address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200456 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200457{
Avi Kivity30951152012-10-30 13:47:46 +0200458 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000459 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
460
461 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200462
463 assert(!section->mr->iommu_ops);
464 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200465}
bellard9fa3e852004-01-04 18:06:42 +0000466#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000467
Andreas Färberb170fce2013-01-20 20:23:22 +0100468#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000469
Juan Quintelae59fb372009-09-29 22:48:21 +0200470static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200471{
Andreas Färber259186a2013-01-17 18:51:17 +0100472 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200473
aurel323098dba2009-03-07 21:28:24 +0000474 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
475 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100476 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100477 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000478
479 return 0;
480}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200481
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400482static int cpu_common_pre_load(void *opaque)
483{
484 CPUState *cpu = opaque;
485
Paolo Bonziniadee6422014-12-19 12:53:14 +0100486 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487
488 return 0;
489}
490
491static bool cpu_common_exception_index_needed(void *opaque)
492{
493 CPUState *cpu = opaque;
494
Paolo Bonziniadee6422014-12-19 12:53:14 +0100495 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400496}
497
498static const VMStateDescription vmstate_cpu_common_exception_index = {
499 .name = "cpu_common/exception_index",
500 .version_id = 1,
501 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200502 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400503 .fields = (VMStateField[]) {
504 VMSTATE_INT32(exception_index, CPUState),
505 VMSTATE_END_OF_LIST()
506 }
507};
508
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300509static bool cpu_common_crash_occurred_needed(void *opaque)
510{
511 CPUState *cpu = opaque;
512
513 return cpu->crash_occurred;
514}
515
516static const VMStateDescription vmstate_cpu_common_crash_occurred = {
517 .name = "cpu_common/crash_occurred",
518 .version_id = 1,
519 .minimum_version_id = 1,
520 .needed = cpu_common_crash_occurred_needed,
521 .fields = (VMStateField[]) {
522 VMSTATE_BOOL(crash_occurred, CPUState),
523 VMSTATE_END_OF_LIST()
524 }
525};
526
Andreas Färber1a1562f2013-06-17 04:09:11 +0200527const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200528 .name = "cpu_common",
529 .version_id = 1,
530 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400531 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200532 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200533 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100534 VMSTATE_UINT32(halted, CPUState),
535 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200536 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400537 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200538 .subsections = (const VMStateDescription*[]) {
539 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300540 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200541 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200542 }
543};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200544
pbrook9656f322008-07-01 20:01:19 +0000545#endif
546
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100547CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400548{
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400550
Andreas Färberbdc44642013-06-24 23:50:24 +0200551 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100552 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200553 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100554 }
Glauber Costa950f1472009-06-09 12:15:18 -0400555 }
556
Andreas Färberbdc44642013-06-24 23:50:24 +0200557 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400558}
559
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000560#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000561void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000562{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000563 CPUAddressSpace *newas;
564
565 /* Target code should have set num_ases before calling us */
566 assert(asidx < cpu->num_ases);
567
Peter Maydell56943e82016-01-21 14:15:04 +0000568 if (asidx == 0) {
569 /* address space 0 gets the convenience alias */
570 cpu->as = as;
571 }
572
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000573 /* KVM cannot currently support multiple address spaces. */
574 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000575
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000576 if (!cpu->cpu_ases) {
577 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000578 }
Peter Maydell32857f42015-10-01 15:29:50 +0100579
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000580 newas = &cpu->cpu_ases[asidx];
581 newas->cpu = cpu;
582 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000583 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000584 newas->tcg_as_listener.commit = tcg_commit;
585 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000586 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000587}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000588
589AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
590{
591 /* Return the AddressSpace corresponding to the specified index */
592 return cpu->cpu_ases[asidx].as;
593}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000594#endif
595
Bharata B Raob7bca732015-06-23 19:31:13 -0700596#ifndef CONFIG_USER_ONLY
597static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
598
599static int cpu_get_free_index(Error **errp)
600{
601 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
602
603 if (cpu >= MAX_CPUMASK_BITS) {
604 error_setg(errp, "Trying to use more CPUs than max of %d",
605 MAX_CPUMASK_BITS);
606 return -1;
607 }
608
609 bitmap_set(cpu_index_map, cpu, 1);
610 return cpu;
611}
612
613void cpu_exec_exit(CPUState *cpu)
614{
615 if (cpu->cpu_index == -1) {
616 /* cpu_index was never allocated by this @cpu or was already freed. */
617 return;
618 }
619
620 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
621 cpu->cpu_index = -1;
622}
623#else
624
625static int cpu_get_free_index(Error **errp)
626{
627 CPUState *some_cpu;
628 int cpu_index = 0;
629
630 CPU_FOREACH(some_cpu) {
631 cpu_index++;
632 }
633 return cpu_index;
634}
635
636void cpu_exec_exit(CPUState *cpu)
637{
638}
639#endif
640
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700641void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000642{
Andreas Färberb170fce2013-01-20 20:23:22 +0100643 CPUClass *cc = CPU_GET_CLASS(cpu);
bellard6a00d602005-11-21 23:25:50 +0000644 int cpu_index;
Bharata B Raob7bca732015-06-23 19:31:13 -0700645 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000646
Peter Maydell56943e82016-01-21 14:15:04 +0000647 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000648 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000649
Eduardo Habkost291135b2015-04-27 17:00:33 -0300650#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300651 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000652
653 /* This is a softmmu CPU object, so create a property for it
654 * so users can wire up its memory. (This can't go in qom/cpu.c
655 * because that file is compiled only once for both user-mode
656 * and system builds.) The default if no link is set up is to use
657 * the system address space.
658 */
659 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
660 (Object **)&cpu->memory,
661 qdev_prop_allow_set_link_before_realize,
662 OBJ_PROP_LINK_UNREF_ON_RELEASE,
663 &error_abort);
664 cpu->memory = system_memory;
665 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300666#endif
667
pbrookc2764712009-03-07 15:24:59 +0000668#if defined(CONFIG_USER_ONLY)
669 cpu_list_lock();
670#endif
Bharata B Raob7bca732015-06-23 19:31:13 -0700671 cpu_index = cpu->cpu_index = cpu_get_free_index(&local_err);
672 if (local_err) {
673 error_propagate(errp, local_err);
674#if defined(CONFIG_USER_ONLY)
675 cpu_list_unlock();
676#endif
677 return;
bellard6a00d602005-11-21 23:25:50 +0000678 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200679 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000680#if defined(CONFIG_USER_ONLY)
681 cpu_list_unlock();
682#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200683 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
684 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
685 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100686 if (cc->vmsd != NULL) {
687 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
688 }
bellardfd6ce8f2003-05-14 19:00:11 +0000689}
690
Paul Brook94df27f2010-02-28 23:47:45 +0000691#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200692static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000693{
694 tb_invalidate_phys_page_range(pc, pc + 1, 0);
695}
696#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200697static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400698{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000699 MemTxAttrs attrs;
700 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
701 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400702 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000703 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100704 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400705 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400706}
bellardc27004e2005-01-03 23:35:10 +0000707#endif
bellardd720b932004-04-25 17:57:43 +0000708
Paul Brookc527ee82010-03-01 03:31:14 +0000709#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200710void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000711
712{
713}
714
Peter Maydell3ee887e2014-09-12 14:06:48 +0100715int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
716 int flags)
717{
718 return -ENOSYS;
719}
720
721void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
722{
723}
724
Andreas Färber75a34032013-09-02 16:57:02 +0200725int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000726 int flags, CPUWatchpoint **watchpoint)
727{
728 return -ENOSYS;
729}
730#else
pbrook6658ffb2007-03-16 23:58:11 +0000731/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200732int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000733 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000734{
aliguoric0ce9982008-11-25 22:13:57 +0000735 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000736
Peter Maydell05068c02014-09-12 14:06:48 +0100737 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700738 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200739 error_report("tried to set invalid watchpoint at %"
740 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000741 return -EINVAL;
742 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500743 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000744
aliguoria1d1bb32008-11-18 20:07:32 +0000745 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100746 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000747 wp->flags = flags;
748
aliguori2dc9f412008-11-18 20:56:59 +0000749 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200750 if (flags & BP_GDB) {
751 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
752 } else {
753 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
754 }
aliguoria1d1bb32008-11-18 20:07:32 +0000755
Andreas Färber31b030d2013-09-04 01:29:02 +0200756 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000757
758 if (watchpoint)
759 *watchpoint = wp;
760 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000761}
762
aliguoria1d1bb32008-11-18 20:07:32 +0000763/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200764int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000765 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000766{
aliguoria1d1bb32008-11-18 20:07:32 +0000767 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000768
Andreas Färberff4700b2013-08-26 18:23:18 +0200769 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100770 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000771 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200772 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000773 return 0;
774 }
775 }
aliguoria1d1bb32008-11-18 20:07:32 +0000776 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000777}
778
aliguoria1d1bb32008-11-18 20:07:32 +0000779/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200780void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000781{
Andreas Färberff4700b2013-08-26 18:23:18 +0200782 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000783
Andreas Färber31b030d2013-09-04 01:29:02 +0200784 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000785
Anthony Liguori7267c092011-08-20 22:09:37 -0500786 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000787}
788
aliguoria1d1bb32008-11-18 20:07:32 +0000789/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200790void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000791{
aliguoric0ce9982008-11-25 22:13:57 +0000792 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000793
Andreas Färberff4700b2013-08-26 18:23:18 +0200794 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200795 if (wp->flags & mask) {
796 cpu_watchpoint_remove_by_ref(cpu, wp);
797 }
aliguoric0ce9982008-11-25 22:13:57 +0000798 }
aliguoria1d1bb32008-11-18 20:07:32 +0000799}
Peter Maydell05068c02014-09-12 14:06:48 +0100800
801/* Return true if this watchpoint address matches the specified
802 * access (ie the address range covered by the watchpoint overlaps
803 * partially or completely with the address range covered by the
804 * access).
805 */
806static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
807 vaddr addr,
808 vaddr len)
809{
810 /* We know the lengths are non-zero, but a little caution is
811 * required to avoid errors in the case where the range ends
812 * exactly at the top of the address space and so addr + len
813 * wraps round to zero.
814 */
815 vaddr wpend = wp->vaddr + wp->len - 1;
816 vaddr addrend = addr + len - 1;
817
818 return !(addr > wpend || wp->vaddr > addrend);
819}
820
Paul Brookc527ee82010-03-01 03:31:14 +0000821#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000822
823/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200824int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000825 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000826{
aliguoric0ce9982008-11-25 22:13:57 +0000827 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000828
Anthony Liguori7267c092011-08-20 22:09:37 -0500829 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000830
831 bp->pc = pc;
832 bp->flags = flags;
833
aliguori2dc9f412008-11-18 20:56:59 +0000834 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200835 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200836 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200837 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200838 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200839 }
aliguoria1d1bb32008-11-18 20:07:32 +0000840
Andreas Färberf0c3c502013-08-26 21:22:53 +0200841 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000842
Andreas Färber00b941e2013-06-29 18:55:54 +0200843 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000844 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200845 }
aliguoria1d1bb32008-11-18 20:07:32 +0000846 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000847}
848
849/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200850int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000851{
aliguoria1d1bb32008-11-18 20:07:32 +0000852 CPUBreakpoint *bp;
853
Andreas Färberf0c3c502013-08-26 21:22:53 +0200854 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000855 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200856 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000857 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000858 }
bellard4c3a88a2003-07-26 12:06:08 +0000859 }
aliguoria1d1bb32008-11-18 20:07:32 +0000860 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000861}
862
aliguoria1d1bb32008-11-18 20:07:32 +0000863/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200864void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000865{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200866 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
867
868 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000869
Anthony Liguori7267c092011-08-20 22:09:37 -0500870 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000871}
872
873/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200874void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000875{
aliguoric0ce9982008-11-25 22:13:57 +0000876 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000877
Andreas Färberf0c3c502013-08-26 21:22:53 +0200878 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200879 if (bp->flags & mask) {
880 cpu_breakpoint_remove_by_ref(cpu, bp);
881 }
aliguoric0ce9982008-11-25 22:13:57 +0000882 }
bellard4c3a88a2003-07-26 12:06:08 +0000883}
884
bellardc33a3462003-07-29 20:50:33 +0000885/* enable or disable single step mode. EXCP_DEBUG is returned by the
886 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200887void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000888{
Andreas Färbered2803d2013-06-21 20:20:45 +0200889 if (cpu->singlestep_enabled != enabled) {
890 cpu->singlestep_enabled = enabled;
891 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200892 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200893 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100894 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000895 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700896 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000897 }
bellardc33a3462003-07-29 20:50:33 +0000898 }
bellardc33a3462003-07-29 20:50:33 +0000899}
900
Andreas Färbera47dddd2013-09-03 17:38:47 +0200901void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000902{
903 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000904 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000905
906 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000907 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000908 fprintf(stderr, "qemu: fatal: ");
909 vfprintf(stderr, fmt, ap);
910 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200911 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100912 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000913 qemu_log("qemu: fatal: ");
914 qemu_log_vprintf(fmt, ap2);
915 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200916 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000917 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000918 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000919 }
pbrook493ae1f2007-11-23 16:53:59 +0000920 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000921 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300922 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200923#if defined(CONFIG_USER_ONLY)
924 {
925 struct sigaction act;
926 sigfillset(&act.sa_mask);
927 act.sa_handler = SIG_DFL;
928 sigaction(SIGABRT, &act, NULL);
929 }
930#endif
bellard75012672003-06-21 13:11:07 +0000931 abort();
932}
933
bellard01243112004-01-04 15:48:17 +0000934#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400935/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200936static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
937{
938 RAMBlock *block;
939
Paolo Bonzini43771532013-09-09 17:58:40 +0200940 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200941 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200942 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200943 }
Mike Day0dc3f442013-09-05 14:41:35 -0400944 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200945 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200946 goto found;
947 }
948 }
949
950 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
951 abort();
952
953found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200954 /* It is safe to write mru_block outside the iothread lock. This
955 * is what happens:
956 *
957 * mru_block = xxx
958 * rcu_read_unlock()
959 * xxx removed from list
960 * rcu_read_lock()
961 * read mru_block
962 * mru_block = NULL;
963 * call_rcu(reclaim_ramblock, xxx);
964 * rcu_read_unlock()
965 *
966 * atomic_rcu_set is not needed here. The block was already published
967 * when it was placed into the list. Here we're just making an extra
968 * copy of the pointer.
969 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200970 ram_list.mru_block = block;
971 return block;
972}
973
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200974static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000975{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700976 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200977 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200978 RAMBlock *block;
979 ram_addr_t end;
980
981 end = TARGET_PAGE_ALIGN(start + length);
982 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000983
Mike Day0dc3f442013-09-05 14:41:35 -0400984 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200985 block = qemu_get_ram_block(start);
986 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200987 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700988 CPU_FOREACH(cpu) {
989 tlb_reset_dirty(cpu, start1, length);
990 }
Mike Day0dc3f442013-09-05 14:41:35 -0400991 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200992}
993
994/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000995bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
996 ram_addr_t length,
997 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200998{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000999 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001000 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001001 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001002
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001003 if (length == 0) {
1004 return false;
1005 }
1006
1007 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1008 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001009
1010 rcu_read_lock();
1011
1012 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1013
1014 while (page < end) {
1015 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1016 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1017 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1018
1019 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1020 offset, num);
1021 page += num;
1022 }
1023
1024 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001025
1026 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001027 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001028 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001029
1030 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001031}
1032
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001033/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001034hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001035 MemoryRegionSection *section,
1036 target_ulong vaddr,
1037 hwaddr paddr, hwaddr xlat,
1038 int prot,
1039 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001040{
Avi Kivitya8170e52012-10-23 12:30:10 +02001041 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001042 CPUWatchpoint *wp;
1043
Blue Swirlcc5bea62012-04-14 14:56:48 +00001044 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001045 /* Normal RAM. */
1046 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001047 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001048 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001049 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001050 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001051 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001052 }
1053 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001054 AddressSpaceDispatch *d;
1055
1056 d = atomic_rcu_read(&section->address_space->dispatch);
1057 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001058 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001059 }
1060
1061 /* Make accesses to pages with watchpoints go via the
1062 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001063 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001064 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001065 /* Avoid trapping reads of pages with a write breakpoint. */
1066 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001067 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001068 *address |= TLB_MMIO;
1069 break;
1070 }
1071 }
1072 }
1073
1074 return iotlb;
1075}
bellard9fa3e852004-01-04 18:06:42 +00001076#endif /* defined(CONFIG_USER_ONLY) */
1077
pbrooke2eef172008-06-08 01:09:01 +00001078#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001079
Anthony Liguoric227f092009-10-01 16:12:16 -05001080static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001081 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001082static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001083
Igor Mammedova2b257d2014-10-31 16:38:37 +00001084static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1085 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001086
1087/*
1088 * Set a custom physical guest memory alloator.
1089 * Accelerators with unusual needs may need this. Hopefully, we can
1090 * get rid of it eventually.
1091 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001092void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001093{
1094 phys_mem_alloc = alloc;
1095}
1096
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001097static uint16_t phys_section_add(PhysPageMap *map,
1098 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001099{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001100 /* The physical section number is ORed with a page-aligned
1101 * pointer to produce the iotlb entries. Thus it should
1102 * never overflow into the page-aligned value.
1103 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001104 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001105
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 if (map->sections_nb == map->sections_nb_alloc) {
1107 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1108 map->sections = g_renew(MemoryRegionSection, map->sections,
1109 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001110 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001111 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001112 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001113 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001114}
1115
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001116static void phys_section_destroy(MemoryRegion *mr)
1117{
Don Slutz55b4e802015-11-30 17:11:04 -05001118 bool have_sub_page = mr->subpage;
1119
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001120 memory_region_unref(mr);
1121
Don Slutz55b4e802015-11-30 17:11:04 -05001122 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001123 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001124 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001125 g_free(subpage);
1126 }
1127}
1128
Paolo Bonzini60926662013-05-29 12:30:26 +02001129static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001130{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001131 while (map->sections_nb > 0) {
1132 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001133 phys_section_destroy(section->mr);
1134 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001135 g_free(map->sections);
1136 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001137}
1138
Avi Kivityac1970f2012-10-03 16:22:53 +02001139static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001140{
1141 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001142 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001143 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001144 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001145 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146 MemoryRegionSection subsection = {
1147 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001149 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001150 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001151
Avi Kivityf3705d52012-03-08 16:16:34 +02001152 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001153
Avi Kivityf3705d52012-03-08 16:16:34 +02001154 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001155 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001156 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001158 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001159 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001160 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001161 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001162 }
1163 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001164 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001165 subpage_register(subpage, start, end,
1166 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001167}
1168
1169
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001170static void register_multipage(AddressSpaceDispatch *d,
1171 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001172{
Avi Kivitya8170e52012-10-23 12:30:10 +02001173 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001174 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001175 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1176 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001177
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001178 assert(num_pages);
1179 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001180}
1181
Avi Kivityac1970f2012-10-03 16:22:53 +02001182static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001183{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001184 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001185 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001186 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001187 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001188
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001189 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1190 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1191 - now.offset_within_address_space;
1192
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001194 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001195 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001196 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001197 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198 while (int128_ne(remain.size, now.size)) {
1199 remain.size = int128_sub(remain.size, now.size);
1200 remain.offset_within_address_space += int128_get64(now.size);
1201 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001202 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001203 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001204 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001205 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001206 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001207 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001208 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001209 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001210 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001211 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001212 }
1213}
1214
Sheng Yang62a27442010-01-26 19:21:16 +08001215void qemu_flush_coalesced_mmio_buffer(void)
1216{
1217 if (kvm_enabled())
1218 kvm_flush_coalesced_mmio_buffer();
1219}
1220
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001221void qemu_mutex_lock_ramlist(void)
1222{
1223 qemu_mutex_lock(&ram_list.mutex);
1224}
1225
1226void qemu_mutex_unlock_ramlist(void)
1227{
1228 qemu_mutex_unlock(&ram_list.mutex);
1229}
1230
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001231#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001232static void *file_ram_alloc(RAMBlock *block,
1233 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001234 const char *path,
1235 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001237 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001238 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001239 char *sanitized_name;
1240 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001241 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001242 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001243 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001244
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001245 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1246 error_setg(errp,
1247 "host lacks kvm mmu notifiers, -mem-path unsupported");
1248 return NULL;
1249 }
1250
1251 for (;;) {
1252 fd = open(path, O_RDWR);
1253 if (fd >= 0) {
1254 /* @path names an existing file, use it */
1255 break;
1256 }
1257 if (errno == ENOENT) {
1258 /* @path names a file that doesn't exist, create it */
1259 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1260 if (fd >= 0) {
1261 unlink_on_error = true;
1262 break;
1263 }
1264 } else if (errno == EISDIR) {
1265 /* @path names a directory, create a file there */
1266 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1267 sanitized_name = g_strdup(memory_region_name(block->mr));
1268 for (c = sanitized_name; *c != '\0'; c++) {
1269 if (*c == '/') {
1270 *c = '_';
1271 }
1272 }
1273
1274 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1275 sanitized_name);
1276 g_free(sanitized_name);
1277
1278 fd = mkstemp(filename);
1279 if (fd >= 0) {
1280 unlink(filename);
1281 g_free(filename);
1282 break;
1283 }
1284 g_free(filename);
1285 }
1286 if (errno != EEXIST && errno != EINTR) {
1287 error_setg_errno(errp, errno,
1288 "can't open backing store %s for guest RAM",
1289 path);
1290 goto error;
1291 }
1292 /*
1293 * Try again on EINTR and EEXIST. The latter happens when
1294 * something else creates the file between our two open().
1295 */
1296 }
1297
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001298 page_size = qemu_fd_getpagesize(fd);
1299 block->mr->align = page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001300
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001301 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001302 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001303 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001304 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001305 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001306 }
1307
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001308 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001309
1310 /*
1311 * ftruncate is not supported by hugetlbfs in older
1312 * hosts, so don't bother bailing out on errors.
1313 * If anything goes wrong with it under other filesystems,
1314 * mmap will fail.
1315 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001316 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001317 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001318 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001319
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001320 area = qemu_ram_mmap(fd, memory, page_size, block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001321 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001322 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001323 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001324 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001325 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001326
1327 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001328 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001329 }
1330
Alex Williamson04b16652010-07-02 11:13:17 -06001331 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001332 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001333
1334error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001335 if (unlink_on_error) {
1336 unlink(path);
1337 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001338 if (fd != -1) {
1339 close(fd);
1340 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001341 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001342}
1343#endif
1344
Mike Day0dc3f442013-09-05 14:41:35 -04001345/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001346static ram_addr_t find_ram_offset(ram_addr_t size)
1347{
Alex Williamson04b16652010-07-02 11:13:17 -06001348 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001349 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001350
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001351 assert(size != 0); /* it would hand out same offset multiple times */
1352
Mike Day0dc3f442013-09-05 14:41:35 -04001353 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001354 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001355 }
Alex Williamson04b16652010-07-02 11:13:17 -06001356
Mike Day0dc3f442013-09-05 14:41:35 -04001357 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001358 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001359
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001360 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001361
Mike Day0dc3f442013-09-05 14:41:35 -04001362 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001363 if (next_block->offset >= end) {
1364 next = MIN(next, next_block->offset);
1365 }
1366 }
1367 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001368 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001369 mingap = next - end;
1370 }
1371 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001372
1373 if (offset == RAM_ADDR_MAX) {
1374 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1375 (uint64_t)size);
1376 abort();
1377 }
1378
Alex Williamson04b16652010-07-02 11:13:17 -06001379 return offset;
1380}
1381
Juan Quintela652d7ec2012-07-20 10:37:54 +02001382ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001383{
Alex Williamsond17b5282010-06-25 11:08:38 -06001384 RAMBlock *block;
1385 ram_addr_t last = 0;
1386
Mike Day0dc3f442013-09-05 14:41:35 -04001387 rcu_read_lock();
1388 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001389 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001390 }
Mike Day0dc3f442013-09-05 14:41:35 -04001391 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001392 return last;
1393}
1394
Jason Baronddb97f12012-08-02 15:44:16 -04001395static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1396{
1397 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001398
1399 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001400 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001401 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1402 if (ret) {
1403 perror("qemu_madvise");
1404 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1405 "but dump_guest_core=off specified\n");
1406 }
1407 }
1408}
1409
Mike Day0dc3f442013-09-05 14:41:35 -04001410/* Called within an RCU critical section, or while the ramlist lock
1411 * is held.
1412 */
Hu Tao20cfe882014-04-02 15:13:26 +08001413static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001414{
Hu Tao20cfe882014-04-02 15:13:26 +08001415 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001416
Mike Day0dc3f442013-09-05 14:41:35 -04001417 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001418 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001419 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001420 }
1421 }
Hu Tao20cfe882014-04-02 15:13:26 +08001422
1423 return NULL;
1424}
1425
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001426const char *qemu_ram_get_idstr(RAMBlock *rb)
1427{
1428 return rb->idstr;
1429}
1430
Mike Dayae3a7042013-09-05 14:41:35 -04001431/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001432void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1433{
Mike Dayae3a7042013-09-05 14:41:35 -04001434 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001435
Mike Day0dc3f442013-09-05 14:41:35 -04001436 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001437 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001438 assert(new_block);
1439 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001440
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001441 if (dev) {
1442 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001443 if (id) {
1444 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001445 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001446 }
1447 }
1448 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1449
Mike Day0dc3f442013-09-05 14:41:35 -04001450 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001451 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001452 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1453 new_block->idstr);
1454 abort();
1455 }
1456 }
Mike Day0dc3f442013-09-05 14:41:35 -04001457 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001458}
1459
Mike Dayae3a7042013-09-05 14:41:35 -04001460/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001461void qemu_ram_unset_idstr(ram_addr_t addr)
1462{
Mike Dayae3a7042013-09-05 14:41:35 -04001463 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001464
Mike Dayae3a7042013-09-05 14:41:35 -04001465 /* FIXME: arch_init.c assumes that this is not called throughout
1466 * migration. Ignore the problem since hot-unplug during migration
1467 * does not work anyway.
1468 */
1469
Mike Day0dc3f442013-09-05 14:41:35 -04001470 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001471 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001472 if (block) {
1473 memset(block->idstr, 0, sizeof(block->idstr));
1474 }
Mike Day0dc3f442013-09-05 14:41:35 -04001475 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001476}
1477
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001478static int memory_try_enable_merging(void *addr, size_t len)
1479{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001480 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001481 /* disabled by the user */
1482 return 0;
1483 }
1484
1485 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1486}
1487
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001488/* Only legal before guest might have detected the memory size: e.g. on
1489 * incoming migration, or right after reset.
1490 *
1491 * As memory core doesn't know how is memory accessed, it is up to
1492 * resize callback to update device state and/or add assertions to detect
1493 * misuse, if necessary.
1494 */
1495int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1496{
1497 RAMBlock *block = find_ram_block(base);
1498
1499 assert(block);
1500
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001501 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001502
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001503 if (block->used_length == newsize) {
1504 return 0;
1505 }
1506
1507 if (!(block->flags & RAM_RESIZEABLE)) {
1508 error_setg_errno(errp, EINVAL,
1509 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1510 " in != 0x" RAM_ADDR_FMT, block->idstr,
1511 newsize, block->used_length);
1512 return -EINVAL;
1513 }
1514
1515 if (block->max_length < newsize) {
1516 error_setg_errno(errp, EINVAL,
1517 "Length too large: %s: 0x" RAM_ADDR_FMT
1518 " > 0x" RAM_ADDR_FMT, block->idstr,
1519 newsize, block->max_length);
1520 return -EINVAL;
1521 }
1522
1523 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1524 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001525 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1526 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001527 memory_region_set_size(block->mr, newsize);
1528 if (block->resized) {
1529 block->resized(block->idstr, newsize, block->host);
1530 }
1531 return 0;
1532}
1533
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001534/* Called with ram_list.mutex held */
1535static void dirty_memory_extend(ram_addr_t old_ram_size,
1536 ram_addr_t new_ram_size)
1537{
1538 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1539 DIRTY_MEMORY_BLOCK_SIZE);
1540 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1541 DIRTY_MEMORY_BLOCK_SIZE);
1542 int i;
1543
1544 /* Only need to extend if block count increased */
1545 if (new_num_blocks <= old_num_blocks) {
1546 return;
1547 }
1548
1549 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1550 DirtyMemoryBlocks *old_blocks;
1551 DirtyMemoryBlocks *new_blocks;
1552 int j;
1553
1554 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1555 new_blocks = g_malloc(sizeof(*new_blocks) +
1556 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1557
1558 if (old_num_blocks) {
1559 memcpy(new_blocks->blocks, old_blocks->blocks,
1560 old_num_blocks * sizeof(old_blocks->blocks[0]));
1561 }
1562
1563 for (j = old_num_blocks; j < new_num_blocks; j++) {
1564 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1565 }
1566
1567 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1568
1569 if (old_blocks) {
1570 g_free_rcu(old_blocks, rcu);
1571 }
1572 }
1573}
1574
Fam Zheng528f46a2016-03-01 14:18:18 +08001575static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001576{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001577 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001578 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001579 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001580 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001581
1582 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001583
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001584 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001585 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001586
1587 if (!new_block->host) {
1588 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001589 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001590 new_block->mr, &err);
1591 if (err) {
1592 error_propagate(errp, err);
1593 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001594 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001595 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001596 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001597 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001598 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001599 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001600 error_setg_errno(errp, errno,
1601 "cannot set up guest memory '%s'",
1602 memory_region_name(new_block->mr));
1603 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001604 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001605 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001606 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001607 }
1608 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001609
Li Zhijiandd631692015-07-02 20:18:06 +08001610 new_ram_size = MAX(old_ram_size,
1611 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1612 if (new_ram_size > old_ram_size) {
1613 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001614 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001615 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001616 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1617 * QLIST (which has an RCU-friendly variant) does not have insertion at
1618 * tail, so save the last element in last_block.
1619 */
Mike Day0dc3f442013-09-05 14:41:35 -04001620 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001621 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001622 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001623 break;
1624 }
1625 }
1626 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001627 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001628 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001629 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001630 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001631 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001632 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001633 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001634
Mike Day0dc3f442013-09-05 14:41:35 -04001635 /* Write list before version */
1636 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001637 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001638 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001639
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001640 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001641 new_block->used_length,
1642 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001643
Paolo Bonzinia904c912015-01-21 16:18:35 +01001644 if (new_block->host) {
1645 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1646 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1647 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1648 if (kvm_enabled()) {
1649 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1650 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001651 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001652}
1653
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001654#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001655RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1656 bool share, const char *mem_path,
1657 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001658{
1659 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001660 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001661
1662 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001663 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001664 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001665 }
1666
1667 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1668 /*
1669 * file_ram_alloc() needs to allocate just like
1670 * phys_mem_alloc, but we haven't bothered to provide
1671 * a hook there.
1672 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001673 error_setg(errp,
1674 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001675 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001676 }
1677
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001678 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001679 new_block = g_malloc0(sizeof(*new_block));
1680 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001681 new_block->used_length = size;
1682 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001683 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001684 new_block->host = file_ram_alloc(new_block, size,
1685 mem_path, errp);
1686 if (!new_block->host) {
1687 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001688 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001689 }
1690
Fam Zheng528f46a2016-03-01 14:18:18 +08001691 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001692 if (local_err) {
1693 g_free(new_block);
1694 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001695 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001696 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001697 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001698}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001699#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001700
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001701static
Fam Zheng528f46a2016-03-01 14:18:18 +08001702RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1703 void (*resized)(const char*,
1704 uint64_t length,
1705 void *host),
1706 void *host, bool resizeable,
1707 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001708{
1709 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001710 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001711
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001712 size = HOST_PAGE_ALIGN(size);
1713 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001714 new_block = g_malloc0(sizeof(*new_block));
1715 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001716 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001717 new_block->used_length = size;
1718 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001719 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001720 new_block->fd = -1;
1721 new_block->host = host;
1722 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001723 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001724 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001725 if (resizeable) {
1726 new_block->flags |= RAM_RESIZEABLE;
1727 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001728 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001729 if (local_err) {
1730 g_free(new_block);
1731 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001732 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001733 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001734 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001735}
1736
Fam Zheng528f46a2016-03-01 14:18:18 +08001737RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001738 MemoryRegion *mr, Error **errp)
1739{
1740 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1741}
1742
Fam Zheng528f46a2016-03-01 14:18:18 +08001743RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001744{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001745 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1746}
1747
Fam Zheng528f46a2016-03-01 14:18:18 +08001748RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001749 void (*resized)(const char*,
1750 uint64_t length,
1751 void *host),
1752 MemoryRegion *mr, Error **errp)
1753{
1754 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001755}
bellarde9a1ab12007-02-08 23:08:38 +00001756
Paolo Bonzini43771532013-09-09 17:58:40 +02001757static void reclaim_ramblock(RAMBlock *block)
1758{
1759 if (block->flags & RAM_PREALLOC) {
1760 ;
1761 } else if (xen_enabled()) {
1762 xen_invalidate_map_cache_entry(block->host);
1763#ifndef _WIN32
1764 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001765 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001766 close(block->fd);
1767#endif
1768 } else {
1769 qemu_anon_ram_free(block->host, block->max_length);
1770 }
1771 g_free(block);
1772}
1773
Fam Zhengf1060c52016-03-01 14:18:22 +08001774void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001775{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001776 if (!block) {
1777 return;
1778 }
1779
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001780 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001781 QLIST_REMOVE_RCU(block, next);
1782 ram_list.mru_block = NULL;
1783 /* Write list before version */
1784 smp_wmb();
1785 ram_list.version++;
1786 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001787 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001788}
1789
Huang Yingcd19cfa2011-03-02 08:56:19 +01001790#ifndef _WIN32
1791void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1792{
1793 RAMBlock *block;
1794 ram_addr_t offset;
1795 int flags;
1796 void *area, *vaddr;
1797
Mike Day0dc3f442013-09-05 14:41:35 -04001798 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001799 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001800 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001801 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001802 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001803 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001804 } else if (xen_enabled()) {
1805 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001806 } else {
1807 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001808 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001809 flags |= (block->flags & RAM_SHARED ?
1810 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001811 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1812 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001813 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001814 /*
1815 * Remap needs to match alloc. Accelerators that
1816 * set phys_mem_alloc never remap. If they did,
1817 * we'd need a remap hook here.
1818 */
1819 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1820
Huang Yingcd19cfa2011-03-02 08:56:19 +01001821 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1822 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1823 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001824 }
1825 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001826 fprintf(stderr, "Could not remap addr: "
1827 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001828 length, addr);
1829 exit(1);
1830 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001831 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001832 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001833 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001834 }
1835 }
1836}
1837#endif /* !_WIN32 */
1838
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001839int qemu_get_ram_fd(ram_addr_t addr)
1840{
Mike Dayae3a7042013-09-05 14:41:35 -04001841 RAMBlock *block;
1842 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001843
Mike Day0dc3f442013-09-05 14:41:35 -04001844 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001845 block = qemu_get_ram_block(addr);
1846 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001847 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001848 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001849}
1850
Tetsuya Mukawa56a571d2015-12-21 12:47:34 +09001851void qemu_set_ram_fd(ram_addr_t addr, int fd)
1852{
1853 RAMBlock *block;
1854
1855 rcu_read_lock();
1856 block = qemu_get_ram_block(addr);
1857 block->fd = fd;
1858 rcu_read_unlock();
1859}
1860
Damjan Marion3fd74b82014-06-26 23:01:32 +02001861void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1862{
Mike Dayae3a7042013-09-05 14:41:35 -04001863 RAMBlock *block;
1864 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001865
Mike Day0dc3f442013-09-05 14:41:35 -04001866 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001867 block = qemu_get_ram_block(addr);
1868 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001869 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001870 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001871}
1872
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001873/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001874 * This should not be used for general purpose DMA. Use address_space_map
1875 * or address_space_rw instead. For local memory (e.g. video ram) that the
1876 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001877 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001878 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001879 */
Gonglei3655cb92016-02-20 10:35:20 +08001880void *qemu_get_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001881{
Gonglei3655cb92016-02-20 10:35:20 +08001882 RAMBlock *block = ram_block;
1883
1884 if (block == NULL) {
1885 block = qemu_get_ram_block(addr);
1886 }
Mike Dayae3a7042013-09-05 14:41:35 -04001887
1888 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001889 /* We need to check if the requested address is in the RAM
1890 * because we don't want to map the entire memory in QEMU.
1891 * In that case just map until the end of the page.
1892 */
1893 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001894 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001895 }
Mike Dayae3a7042013-09-05 14:41:35 -04001896
1897 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001898 }
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001899 return ramblock_ptr(block, addr - block->offset);
pbrookdc828ca2009-04-09 22:21:07 +00001900}
1901
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001902/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001903 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001904 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001905 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001906 */
Gonglei3655cb92016-02-20 10:35:20 +08001907static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1908 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001909{
Gonglei3655cb92016-02-20 10:35:20 +08001910 RAMBlock *block = ram_block;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001911 ram_addr_t offset_inside_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001912 if (*size == 0) {
1913 return NULL;
1914 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001915
Gonglei3655cb92016-02-20 10:35:20 +08001916 if (block == NULL) {
1917 block = qemu_get_ram_block(addr);
1918 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001919 offset_inside_block = addr - block->offset;
1920 *size = MIN(*size, block->max_length - offset_inside_block);
1921
1922 if (xen_enabled() && block->host == NULL) {
1923 /* We need to check if the requested address is in the RAM
1924 * because we don't want to map the entire memory in QEMU.
1925 * In that case just map the requested area.
1926 */
1927 if (block->offset == 0) {
1928 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001929 }
1930
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001931 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001932 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001933
1934 return ramblock_ptr(block, offset_inside_block);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001935}
1936
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001937/*
1938 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1939 * in that RAMBlock.
1940 *
1941 * ptr: Host pointer to look up
1942 * round_offset: If true round the result offset down to a page boundary
1943 * *ram_addr: set to result ram_addr
1944 * *offset: set to result offset within the RAMBlock
1945 *
1946 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001947 *
1948 * By the time this function returns, the returned pointer is not protected
1949 * by RCU anymore. If the caller is not within an RCU critical section and
1950 * does not hold the iothread lock, it must have other means of protecting the
1951 * pointer, such as a reference to the region that includes the incoming
1952 * ram_addr_t.
1953 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001954RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
1955 ram_addr_t *ram_addr,
1956 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001957{
pbrook94a6b542009-04-11 17:15:54 +00001958 RAMBlock *block;
1959 uint8_t *host = ptr;
1960
Jan Kiszka868bb332011-06-21 22:59:09 +02001961 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001962 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001963 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001964 block = qemu_get_ram_block(*ram_addr);
1965 if (block) {
1966 *offset = (host - block->host);
1967 }
Mike Day0dc3f442013-09-05 14:41:35 -04001968 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001969 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001970 }
1971
Mike Day0dc3f442013-09-05 14:41:35 -04001972 rcu_read_lock();
1973 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001974 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001975 goto found;
1976 }
1977
Mike Day0dc3f442013-09-05 14:41:35 -04001978 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001979 /* This case append when the block is not mapped. */
1980 if (block->host == NULL) {
1981 continue;
1982 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001983 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001984 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001985 }
pbrook94a6b542009-04-11 17:15:54 +00001986 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001987
Mike Day0dc3f442013-09-05 14:41:35 -04001988 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001989 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001990
1991found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001992 *offset = (host - block->host);
1993 if (round_offset) {
1994 *offset &= TARGET_PAGE_MASK;
1995 }
1996 *ram_addr = block->offset + *offset;
Mike Day0dc3f442013-09-05 14:41:35 -04001997 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001998 return block;
1999}
2000
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002001/*
2002 * Finds the named RAMBlock
2003 *
2004 * name: The name of RAMBlock to find
2005 *
2006 * Returns: RAMBlock (or NULL if not found)
2007 */
2008RAMBlock *qemu_ram_block_by_name(const char *name)
2009{
2010 RAMBlock *block;
2011
2012 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2013 if (!strcmp(name, block->idstr)) {
2014 return block;
2015 }
2016 }
2017
2018 return NULL;
2019}
2020
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002021/* Some of the softmmu routines need to translate from a host pointer
2022 (typically a TLB entry) back to a ram offset. */
2023MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2024{
2025 RAMBlock *block;
2026 ram_addr_t offset; /* Not used */
2027
2028 block = qemu_ram_block_from_host(ptr, false, ram_addr, &offset);
2029
2030 if (!block) {
2031 return NULL;
2032 }
2033
2034 return block->mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002035}
Alex Williamsonf471a172010-06-11 11:11:42 -06002036
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002037/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002038static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002039 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002040{
Juan Quintela52159192013-10-08 12:44:04 +02002041 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002042 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002043 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002044 switch (size) {
2045 case 1:
Gonglei3655cb92016-02-20 10:35:20 +08002046 stb_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002047 break;
2048 case 2:
Gonglei3655cb92016-02-20 10:35:20 +08002049 stw_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002050 break;
2051 case 4:
Gonglei3655cb92016-02-20 10:35:20 +08002052 stl_p(qemu_get_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002053 break;
2054 default:
2055 abort();
2056 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002057 /* Set both VGA and migration bits for simplicity and to remove
2058 * the notdirty callback faster.
2059 */
2060 cpu_physical_memory_set_dirty_range(ram_addr, size,
2061 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002062 /* we remove the notdirty callback only if the code has been
2063 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002064 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002065 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002066 }
bellard1ccde1c2004-02-06 19:46:14 +00002067}
2068
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002069static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2070 unsigned size, bool is_write)
2071{
2072 return is_write;
2073}
2074
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002075static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002076 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002077 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002078 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002079};
2080
pbrook0f459d12008-06-09 00:20:13 +00002081/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002082static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002083{
Andreas Färber93afead2013-08-26 03:41:01 +02002084 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002085 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002086 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002087 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002088 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002089 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002090 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002091
Andreas Färberff4700b2013-08-26 18:23:18 +02002092 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002093 /* We re-entered the check after replacing the TB. Now raise
2094 * the debug interrupt so that is will trigger after the
2095 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002096 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002097 return;
2098 }
Andreas Färber93afead2013-08-26 03:41:01 +02002099 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002100 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002101 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2102 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002103 if (flags == BP_MEM_READ) {
2104 wp->flags |= BP_WATCHPOINT_HIT_READ;
2105 } else {
2106 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2107 }
2108 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002109 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002110 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002111 if (wp->flags & BP_CPU &&
2112 !cc->debug_check_watchpoint(cpu, wp)) {
2113 wp->flags &= ~BP_WATCHPOINT_HIT;
2114 continue;
2115 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002116 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002117 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002118 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002119 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002120 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002121 } else {
2122 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002123 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02002124 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002125 }
aliguori06d55cc2008-11-18 20:24:06 +00002126 }
aliguori6e140f22008-11-18 20:37:55 +00002127 } else {
2128 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002129 }
2130 }
2131}
2132
pbrook6658ffb2007-03-16 23:58:11 +00002133/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2134 so these check for a hit then pass through to the normal out-of-line
2135 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002136static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2137 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002138{
Peter Maydell66b9b432015-04-26 16:49:24 +01002139 MemTxResult res;
2140 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002141 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2142 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002143
Peter Maydell66b9b432015-04-26 16:49:24 +01002144 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002145 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002146 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002147 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002148 break;
2149 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002150 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002151 break;
2152 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002153 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002154 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002155 default: abort();
2156 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002157 *pdata = data;
2158 return res;
2159}
2160
2161static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2162 uint64_t val, unsigned size,
2163 MemTxAttrs attrs)
2164{
2165 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002166 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2167 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002168
2169 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2170 switch (size) {
2171 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002172 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002173 break;
2174 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002175 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002176 break;
2177 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002178 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002179 break;
2180 default: abort();
2181 }
2182 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002183}
2184
Avi Kivity1ec9b902012-01-02 12:47:48 +02002185static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002186 .read_with_attrs = watch_mem_read,
2187 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002188 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002189};
pbrook6658ffb2007-03-16 23:58:11 +00002190
Peter Maydellf25a49e2015-04-26 16:49:24 +01002191static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2192 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002193{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002194 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002195 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002196 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002197
blueswir1db7b5422007-05-26 17:36:03 +00002198#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002199 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002200 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002201#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002202 res = address_space_read(subpage->as, addr + subpage->base,
2203 attrs, buf, len);
2204 if (res) {
2205 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002206 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002207 switch (len) {
2208 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002209 *data = ldub_p(buf);
2210 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002211 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002212 *data = lduw_p(buf);
2213 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002214 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002215 *data = ldl_p(buf);
2216 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002217 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002218 *data = ldq_p(buf);
2219 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002220 default:
2221 abort();
2222 }
blueswir1db7b5422007-05-26 17:36:03 +00002223}
2224
Peter Maydellf25a49e2015-04-26 16:49:24 +01002225static MemTxResult subpage_write(void *opaque, hwaddr addr,
2226 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002227{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002228 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002229 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002230
blueswir1db7b5422007-05-26 17:36:03 +00002231#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002232 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002233 " value %"PRIx64"\n",
2234 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002235#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002236 switch (len) {
2237 case 1:
2238 stb_p(buf, value);
2239 break;
2240 case 2:
2241 stw_p(buf, value);
2242 break;
2243 case 4:
2244 stl_p(buf, value);
2245 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002246 case 8:
2247 stq_p(buf, value);
2248 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002249 default:
2250 abort();
2251 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002252 return address_space_write(subpage->as, addr + subpage->base,
2253 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002254}
2255
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002256static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002257 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002258{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002259 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002260#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002261 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002262 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002263#endif
2264
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002265 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002266 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002267}
2268
Avi Kivity70c68e42012-01-02 12:32:48 +02002269static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002270 .read_with_attrs = subpage_read,
2271 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002272 .impl.min_access_size = 1,
2273 .impl.max_access_size = 8,
2274 .valid.min_access_size = 1,
2275 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002276 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002277 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002278};
2279
Anthony Liguoric227f092009-10-01 16:12:16 -05002280static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002281 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002282{
2283 int idx, eidx;
2284
2285 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2286 return -1;
2287 idx = SUBPAGE_IDX(start);
2288 eidx = SUBPAGE_IDX(end);
2289#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002290 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2291 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002292#endif
blueswir1db7b5422007-05-26 17:36:03 +00002293 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002294 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002295 }
2296
2297 return 0;
2298}
2299
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002300static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002301{
Anthony Liguoric227f092009-10-01 16:12:16 -05002302 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002303
Anthony Liguori7267c092011-08-20 22:09:37 -05002304 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002305
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002306 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002307 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002308 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002309 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002310 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002311#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002312 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2313 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002314#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002315 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002316
2317 return mmio;
2318}
2319
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002320static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2321 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002322{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002323 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002324 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002325 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002326 .mr = mr,
2327 .offset_within_address_space = 0,
2328 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002329 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002330 };
2331
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002332 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002333}
2334
Peter Maydella54c87b2016-01-21 14:15:05 +00002335MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002336{
Peter Maydella54c87b2016-01-21 14:15:05 +00002337 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2338 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002339 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002340 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002341
2342 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002343}
2344
Avi Kivitye9179ce2009-06-14 11:38:52 +03002345static void io_mem_init(void)
2346{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002347 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002348 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002349 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002350 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002351 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002352 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002353 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002354}
2355
Avi Kivityac1970f2012-10-03 16:22:53 +02002356static void mem_begin(MemoryListener *listener)
2357{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002358 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002359 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2360 uint16_t n;
2361
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002362 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002363 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002364 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002365 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002366 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002367 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002368 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002369 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002370
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002371 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002372 d->as = as;
2373 as->next_dispatch = d;
2374}
2375
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002376static void address_space_dispatch_free(AddressSpaceDispatch *d)
2377{
2378 phys_sections_free(&d->map);
2379 g_free(d);
2380}
2381
Paolo Bonzini00752702013-05-29 12:13:54 +02002382static void mem_commit(MemoryListener *listener)
2383{
2384 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002385 AddressSpaceDispatch *cur = as->dispatch;
2386 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002387
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002388 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002389
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002390 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002391 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002392 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002393 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002394}
2395
Avi Kivity1d711482012-10-02 18:54:45 +02002396static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002397{
Peter Maydell32857f42015-10-01 15:29:50 +01002398 CPUAddressSpace *cpuas;
2399 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002400
2401 /* since each CPU stores ram addresses in its TLB cache, we must
2402 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002403 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2404 cpu_reloading_memory_map();
2405 /* The CPU and TLB are protected by the iothread lock.
2406 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2407 * may have split the RCU critical section.
2408 */
2409 d = atomic_rcu_read(&cpuas->as->dispatch);
2410 cpuas->memory_dispatch = d;
2411 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002412}
2413
Avi Kivityac1970f2012-10-03 16:22:53 +02002414void address_space_init_dispatch(AddressSpace *as)
2415{
Paolo Bonzini00752702013-05-29 12:13:54 +02002416 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002417 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002418 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002419 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002420 .region_add = mem_add,
2421 .region_nop = mem_add,
2422 .priority = 0,
2423 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002424 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002425}
2426
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002427void address_space_unregister(AddressSpace *as)
2428{
2429 memory_listener_unregister(&as->dispatch_listener);
2430}
2431
Avi Kivity83f3c252012-10-07 12:59:55 +02002432void address_space_destroy_dispatch(AddressSpace *as)
2433{
2434 AddressSpaceDispatch *d = as->dispatch;
2435
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002436 atomic_rcu_set(&as->dispatch, NULL);
2437 if (d) {
2438 call_rcu(d, address_space_dispatch_free, rcu);
2439 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002440}
2441
Avi Kivity62152b82011-07-26 14:26:14 +03002442static void memory_map_init(void)
2443{
Anthony Liguori7267c092011-08-20 22:09:37 -05002444 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002445
Paolo Bonzini57271d62013-11-07 17:14:37 +01002446 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002447 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002448
Anthony Liguori7267c092011-08-20 22:09:37 -05002449 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002450 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2451 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002452 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002453}
2454
2455MemoryRegion *get_system_memory(void)
2456{
2457 return system_memory;
2458}
2459
Avi Kivity309cb472011-08-08 16:09:03 +03002460MemoryRegion *get_system_io(void)
2461{
2462 return system_io;
2463}
2464
pbrooke2eef172008-06-08 01:09:01 +00002465#endif /* !defined(CONFIG_USER_ONLY) */
2466
bellard13eb76e2004-01-24 15:23:36 +00002467/* physical memory access (slow version, mainly for debug) */
2468#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002469int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002470 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002471{
2472 int l, flags;
2473 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002474 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002475
2476 while (len > 0) {
2477 page = addr & TARGET_PAGE_MASK;
2478 l = (page + TARGET_PAGE_SIZE) - addr;
2479 if (l > len)
2480 l = len;
2481 flags = page_get_flags(page);
2482 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002483 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002484 if (is_write) {
2485 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002486 return -1;
bellard579a97f2007-11-11 14:26:47 +00002487 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002488 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002489 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002490 memcpy(p, buf, l);
2491 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002492 } else {
2493 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002494 return -1;
bellard579a97f2007-11-11 14:26:47 +00002495 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002496 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002497 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002498 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002499 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002500 }
2501 len -= l;
2502 buf += l;
2503 addr += l;
2504 }
Paul Brooka68fe892010-03-01 00:08:59 +00002505 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002506}
bellard8df1cd02005-01-28 22:37:22 +00002507
bellard13eb76e2004-01-24 15:23:36 +00002508#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002509
Paolo Bonzini845b6212015-03-23 11:45:53 +01002510static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002511 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002512{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002513 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
2514 /* No early return if dirty_log_mask is or becomes 0, because
2515 * cpu_physical_memory_set_dirty_range will still call
2516 * xen_modified_memory.
2517 */
2518 if (dirty_log_mask) {
2519 dirty_log_mask =
2520 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002521 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002522 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2523 tb_invalidate_phys_range(addr, addr + length);
2524 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2525 }
2526 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002527}
2528
Richard Henderson23326162013-07-08 14:55:59 -07002529static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002530{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002531 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002532
2533 /* Regions are assumed to support 1-4 byte accesses unless
2534 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002535 if (access_size_max == 0) {
2536 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002537 }
Richard Henderson23326162013-07-08 14:55:59 -07002538
2539 /* Bound the maximum access by the alignment of the address. */
2540 if (!mr->ops->impl.unaligned) {
2541 unsigned align_size_max = addr & -addr;
2542 if (align_size_max != 0 && align_size_max < access_size_max) {
2543 access_size_max = align_size_max;
2544 }
2545 }
2546
2547 /* Don't attempt accesses larger than the maximum. */
2548 if (l > access_size_max) {
2549 l = access_size_max;
2550 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002551 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002552
2553 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002554}
2555
Jan Kiszka4840f102015-06-18 18:47:22 +02002556static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002557{
Jan Kiszka4840f102015-06-18 18:47:22 +02002558 bool unlocked = !qemu_mutex_iothread_locked();
2559 bool release_lock = false;
2560
2561 if (unlocked && mr->global_locking) {
2562 qemu_mutex_lock_iothread();
2563 unlocked = false;
2564 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002565 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002566 if (mr->flush_coalesced_mmio) {
2567 if (unlocked) {
2568 qemu_mutex_lock_iothread();
2569 }
2570 qemu_flush_coalesced_mmio_buffer();
2571 if (unlocked) {
2572 qemu_mutex_unlock_iothread();
2573 }
2574 }
2575
2576 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002577}
2578
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002579/* Called within RCU critical section. */
2580static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2581 MemTxAttrs attrs,
2582 const uint8_t *buf,
2583 int len, hwaddr addr1,
2584 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002585{
bellard13eb76e2004-01-24 15:23:36 +00002586 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002587 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002588 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002589 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002590
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002591 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002592 if (!memory_access_is_direct(mr, true)) {
2593 release_lock |= prepare_mmio_access(mr);
2594 l = memory_access_size(mr, l, addr1);
2595 /* XXX: could force current_cpu to NULL to avoid
2596 potential bugs */
2597 switch (l) {
2598 case 8:
2599 /* 64 bit write access */
2600 val = ldq_p(buf);
2601 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2602 attrs);
2603 break;
2604 case 4:
2605 /* 32 bit write access */
2606 val = ldl_p(buf);
2607 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2608 attrs);
2609 break;
2610 case 2:
2611 /* 16 bit write access */
2612 val = lduw_p(buf);
2613 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2614 attrs);
2615 break;
2616 case 1:
2617 /* 8 bit write access */
2618 val = ldub_p(buf);
2619 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2620 attrs);
2621 break;
2622 default:
2623 abort();
bellard13eb76e2004-01-24 15:23:36 +00002624 }
2625 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002626 addr1 += memory_region_get_ram_addr(mr);
2627 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002628 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002629 memcpy(ptr, buf, l);
2630 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002631 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002632
2633 if (release_lock) {
2634 qemu_mutex_unlock_iothread();
2635 release_lock = false;
2636 }
2637
bellard13eb76e2004-01-24 15:23:36 +00002638 len -= l;
2639 buf += l;
2640 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002641
2642 if (!len) {
2643 break;
2644 }
2645
2646 l = len;
2647 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002648 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002649
Peter Maydell3b643492015-04-26 16:49:23 +01002650 return result;
bellard13eb76e2004-01-24 15:23:36 +00002651}
bellard8df1cd02005-01-28 22:37:22 +00002652
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002653MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2654 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002655{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002656 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002657 hwaddr addr1;
2658 MemoryRegion *mr;
2659 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002660
2661 if (len > 0) {
2662 rcu_read_lock();
2663 l = len;
2664 mr = address_space_translate(as, addr, &addr1, &l, true);
2665 result = address_space_write_continue(as, addr, attrs, buf, len,
2666 addr1, l, mr);
2667 rcu_read_unlock();
2668 }
2669
2670 return result;
2671}
2672
2673/* Called within RCU critical section. */
2674MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2675 MemTxAttrs attrs, uint8_t *buf,
2676 int len, hwaddr addr1, hwaddr l,
2677 MemoryRegion *mr)
2678{
2679 uint8_t *ptr;
2680 uint64_t val;
2681 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002682 bool release_lock = false;
2683
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002684 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002685 if (!memory_access_is_direct(mr, false)) {
2686 /* I/O case */
2687 release_lock |= prepare_mmio_access(mr);
2688 l = memory_access_size(mr, l, addr1);
2689 switch (l) {
2690 case 8:
2691 /* 64 bit read access */
2692 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2693 attrs);
2694 stq_p(buf, val);
2695 break;
2696 case 4:
2697 /* 32 bit read access */
2698 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2699 attrs);
2700 stl_p(buf, val);
2701 break;
2702 case 2:
2703 /* 16 bit read access */
2704 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2705 attrs);
2706 stw_p(buf, val);
2707 break;
2708 case 1:
2709 /* 8 bit read access */
2710 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2711 attrs);
2712 stb_p(buf, val);
2713 break;
2714 default:
2715 abort();
2716 }
2717 } else {
2718 /* RAM case */
Fam Zheng8e41fb62016-03-01 14:18:21 +08002719 ptr = qemu_get_ram_ptr(mr->ram_block,
2720 memory_region_get_ram_addr(mr) + addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002721 memcpy(buf, ptr, l);
2722 }
2723
2724 if (release_lock) {
2725 qemu_mutex_unlock_iothread();
2726 release_lock = false;
2727 }
2728
2729 len -= l;
2730 buf += l;
2731 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002732
2733 if (!len) {
2734 break;
2735 }
2736
2737 l = len;
2738 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002739 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002740
2741 return result;
2742}
2743
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002744MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2745 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002746{
2747 hwaddr l;
2748 hwaddr addr1;
2749 MemoryRegion *mr;
2750 MemTxResult result = MEMTX_OK;
2751
2752 if (len > 0) {
2753 rcu_read_lock();
2754 l = len;
2755 mr = address_space_translate(as, addr, &addr1, &l, false);
2756 result = address_space_read_continue(as, addr, attrs, buf, len,
2757 addr1, l, mr);
2758 rcu_read_unlock();
2759 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002760
2761 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002762}
2763
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002764MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2765 uint8_t *buf, int len, bool is_write)
2766{
2767 if (is_write) {
2768 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2769 } else {
2770 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2771 }
2772}
Avi Kivityac1970f2012-10-03 16:22:53 +02002773
Avi Kivitya8170e52012-10-23 12:30:10 +02002774void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002775 int len, int is_write)
2776{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002777 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2778 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002779}
2780
Alexander Graf582b55a2013-12-11 14:17:44 +01002781enum write_rom_type {
2782 WRITE_DATA,
2783 FLUSH_CACHE,
2784};
2785
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002786static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002787 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002788{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002789 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002790 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002791 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002792 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002793
Paolo Bonzini41063e12015-03-18 14:21:43 +01002794 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002795 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002796 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002797 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002798
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002799 if (!(memory_region_is_ram(mr) ||
2800 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002801 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002802 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002803 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002804 /* ROM/RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08002805 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002806 switch (type) {
2807 case WRITE_DATA:
2808 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002809 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002810 break;
2811 case FLUSH_CACHE:
2812 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2813 break;
2814 }
bellardd0ecd2a2006-04-23 17:14:48 +00002815 }
2816 len -= l;
2817 buf += l;
2818 addr += l;
2819 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002820 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002821}
2822
Alexander Graf582b55a2013-12-11 14:17:44 +01002823/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002824void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002825 const uint8_t *buf, int len)
2826{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002827 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002828}
2829
2830void cpu_flush_icache_range(hwaddr start, int len)
2831{
2832 /*
2833 * This function should do the same thing as an icache flush that was
2834 * triggered from within the guest. For TCG we are always cache coherent,
2835 * so there is no need to flush anything. For KVM / Xen we need to flush
2836 * the host's instruction cache at least.
2837 */
2838 if (tcg_enabled()) {
2839 return;
2840 }
2841
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002842 cpu_physical_memory_write_rom_internal(&address_space_memory,
2843 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002844}
2845
aliguori6d16c2f2009-01-22 16:59:11 +00002846typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002847 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002848 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002849 hwaddr addr;
2850 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002851 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002852} BounceBuffer;
2853
2854static BounceBuffer bounce;
2855
aliguoriba223c22009-01-22 16:59:16 +00002856typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002857 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002858 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002859} MapClient;
2860
Fam Zheng38e047b2015-03-16 17:03:35 +08002861QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002862static QLIST_HEAD(map_client_list, MapClient) map_client_list
2863 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002864
Fam Zhenge95205e2015-03-16 17:03:37 +08002865static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002866{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002867 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002868 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002869}
2870
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002871static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002872{
2873 MapClient *client;
2874
Blue Swirl72cf2d42009-09-12 07:36:22 +00002875 while (!QLIST_EMPTY(&map_client_list)) {
2876 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002877 qemu_bh_schedule(client->bh);
2878 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002879 }
2880}
2881
Fam Zhenge95205e2015-03-16 17:03:37 +08002882void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002883{
2884 MapClient *client = g_malloc(sizeof(*client));
2885
Fam Zheng38e047b2015-03-16 17:03:35 +08002886 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002887 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002888 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002889 if (!atomic_read(&bounce.in_use)) {
2890 cpu_notify_map_clients_locked();
2891 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002892 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002893}
2894
Fam Zheng38e047b2015-03-16 17:03:35 +08002895void cpu_exec_init_all(void)
2896{
2897 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002898 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002899 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002900 qemu_mutex_init(&map_client_list_lock);
2901}
2902
Fam Zhenge95205e2015-03-16 17:03:37 +08002903void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002904{
Fam Zhenge95205e2015-03-16 17:03:37 +08002905 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002906
Fam Zhenge95205e2015-03-16 17:03:37 +08002907 qemu_mutex_lock(&map_client_list_lock);
2908 QLIST_FOREACH(client, &map_client_list, link) {
2909 if (client->bh == bh) {
2910 cpu_unregister_map_client_do(client);
2911 break;
2912 }
2913 }
2914 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002915}
2916
2917static void cpu_notify_map_clients(void)
2918{
Fam Zheng38e047b2015-03-16 17:03:35 +08002919 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002920 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002921 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002922}
2923
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002924bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2925{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002926 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002927 hwaddr l, xlat;
2928
Paolo Bonzini41063e12015-03-18 14:21:43 +01002929 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002930 while (len > 0) {
2931 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002932 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2933 if (!memory_access_is_direct(mr, is_write)) {
2934 l = memory_access_size(mr, l, addr);
2935 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002936 return false;
2937 }
2938 }
2939
2940 len -= l;
2941 addr += l;
2942 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002943 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002944 return true;
2945}
2946
aliguori6d16c2f2009-01-22 16:59:11 +00002947/* Map a physical memory region into a host virtual address.
2948 * May map a subset of the requested range, given by and returned in *plen.
2949 * May return NULL if resources needed to perform the mapping are exhausted.
2950 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002951 * Use cpu_register_map_client() to know when retrying the map operation is
2952 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002953 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002954void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002955 hwaddr addr,
2956 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002957 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002958{
Avi Kivitya8170e52012-10-23 12:30:10 +02002959 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002960 hwaddr done = 0;
2961 hwaddr l, xlat, base;
2962 MemoryRegion *mr, *this_mr;
2963 ram_addr_t raddr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002964 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002965
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002966 if (len == 0) {
2967 return NULL;
2968 }
aliguori6d16c2f2009-01-22 16:59:11 +00002969
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002970 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002971 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002972 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002973
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002974 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002975 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002976 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002977 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002978 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002979 /* Avoid unbounded allocations */
2980 l = MIN(l, TARGET_PAGE_SIZE);
2981 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002982 bounce.addr = addr;
2983 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002984
2985 memory_region_ref(mr);
2986 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002987 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002988 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2989 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002990 }
aliguori6d16c2f2009-01-22 16:59:11 +00002991
Paolo Bonzini41063e12015-03-18 14:21:43 +01002992 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002993 *plen = l;
2994 return bounce.buffer;
2995 }
2996
2997 base = xlat;
2998 raddr = memory_region_get_ram_addr(mr);
2999
3000 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00003001 len -= l;
3002 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003003 done += l;
3004 if (len == 0) {
3005 break;
3006 }
3007
3008 l = len;
3009 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3010 if (this_mr != mr || xlat != base + done) {
3011 break;
3012 }
aliguori6d16c2f2009-01-22 16:59:11 +00003013 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003014
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003015 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003016 *plen = done;
Gonglei3655cb92016-02-20 10:35:20 +08003017 ptr = qemu_ram_ptr_length(mr->ram_block, raddr + base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003018 rcu_read_unlock();
3019
3020 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003021}
3022
Avi Kivityac1970f2012-10-03 16:22:53 +02003023/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003024 * Will also mark the memory as dirty if is_write == 1. access_len gives
3025 * the amount of memory that was actually read or written by the caller.
3026 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003027void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3028 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003029{
3030 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003031 MemoryRegion *mr;
3032 ram_addr_t addr1;
3033
3034 mr = qemu_ram_addr_from_host(buffer, &addr1);
3035 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003036 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003037 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003038 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003039 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003040 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003041 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003042 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003043 return;
3044 }
3045 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003046 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3047 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003048 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003049 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003050 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003051 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003052 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003053 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003054}
bellardd0ecd2a2006-04-23 17:14:48 +00003055
Avi Kivitya8170e52012-10-23 12:30:10 +02003056void *cpu_physical_memory_map(hwaddr addr,
3057 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003058 int is_write)
3059{
3060 return address_space_map(&address_space_memory, addr, plen, is_write);
3061}
3062
Avi Kivitya8170e52012-10-23 12:30:10 +02003063void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3064 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003065{
3066 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3067}
3068
bellard8df1cd02005-01-28 22:37:22 +00003069/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003070static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3071 MemTxAttrs attrs,
3072 MemTxResult *result,
3073 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003074{
bellard8df1cd02005-01-28 22:37:22 +00003075 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003076 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003077 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003078 hwaddr l = 4;
3079 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003080 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003081 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003082
Paolo Bonzini41063e12015-03-18 14:21:43 +01003083 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003084 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003085 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003086 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003087
bellard8df1cd02005-01-28 22:37:22 +00003088 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003089 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003090#if defined(TARGET_WORDS_BIGENDIAN)
3091 if (endian == DEVICE_LITTLE_ENDIAN) {
3092 val = bswap32(val);
3093 }
3094#else
3095 if (endian == DEVICE_BIG_ENDIAN) {
3096 val = bswap32(val);
3097 }
3098#endif
bellard8df1cd02005-01-28 22:37:22 +00003099 } else {
3100 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003101 ptr = qemu_get_ram_ptr(mr->ram_block,
3102 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003103 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003104 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003105 switch (endian) {
3106 case DEVICE_LITTLE_ENDIAN:
3107 val = ldl_le_p(ptr);
3108 break;
3109 case DEVICE_BIG_ENDIAN:
3110 val = ldl_be_p(ptr);
3111 break;
3112 default:
3113 val = ldl_p(ptr);
3114 break;
3115 }
Peter Maydell50013112015-04-26 16:49:24 +01003116 r = MEMTX_OK;
3117 }
3118 if (result) {
3119 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003120 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003121 if (release_lock) {
3122 qemu_mutex_unlock_iothread();
3123 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003124 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003125 return val;
3126}
3127
Peter Maydell50013112015-04-26 16:49:24 +01003128uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3129 MemTxAttrs attrs, MemTxResult *result)
3130{
3131 return address_space_ldl_internal(as, addr, attrs, result,
3132 DEVICE_NATIVE_ENDIAN);
3133}
3134
3135uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3136 MemTxAttrs attrs, MemTxResult *result)
3137{
3138 return address_space_ldl_internal(as, addr, attrs, result,
3139 DEVICE_LITTLE_ENDIAN);
3140}
3141
3142uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3143 MemTxAttrs attrs, MemTxResult *result)
3144{
3145 return address_space_ldl_internal(as, addr, attrs, result,
3146 DEVICE_BIG_ENDIAN);
3147}
3148
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003149uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003150{
Peter Maydell50013112015-04-26 16:49:24 +01003151 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003152}
3153
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003154uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003155{
Peter Maydell50013112015-04-26 16:49:24 +01003156 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003157}
3158
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003159uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003160{
Peter Maydell50013112015-04-26 16:49:24 +01003161 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003162}
3163
bellard84b7b8e2005-11-28 21:19:04 +00003164/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003165static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3166 MemTxAttrs attrs,
3167 MemTxResult *result,
3168 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003169{
bellard84b7b8e2005-11-28 21:19:04 +00003170 uint8_t *ptr;
3171 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003172 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003173 hwaddr l = 8;
3174 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003175 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003176 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003177
Paolo Bonzini41063e12015-03-18 14:21:43 +01003178 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003179 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003180 false);
3181 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003182 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003183
bellard84b7b8e2005-11-28 21:19:04 +00003184 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003185 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003186#if defined(TARGET_WORDS_BIGENDIAN)
3187 if (endian == DEVICE_LITTLE_ENDIAN) {
3188 val = bswap64(val);
3189 }
3190#else
3191 if (endian == DEVICE_BIG_ENDIAN) {
3192 val = bswap64(val);
3193 }
3194#endif
bellard84b7b8e2005-11-28 21:19:04 +00003195 } else {
3196 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003197 ptr = qemu_get_ram_ptr(mr->ram_block,
3198 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003199 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003200 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003201 switch (endian) {
3202 case DEVICE_LITTLE_ENDIAN:
3203 val = ldq_le_p(ptr);
3204 break;
3205 case DEVICE_BIG_ENDIAN:
3206 val = ldq_be_p(ptr);
3207 break;
3208 default:
3209 val = ldq_p(ptr);
3210 break;
3211 }
Peter Maydell50013112015-04-26 16:49:24 +01003212 r = MEMTX_OK;
3213 }
3214 if (result) {
3215 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003216 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003217 if (release_lock) {
3218 qemu_mutex_unlock_iothread();
3219 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003220 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003221 return val;
3222}
3223
Peter Maydell50013112015-04-26 16:49:24 +01003224uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3225 MemTxAttrs attrs, MemTxResult *result)
3226{
3227 return address_space_ldq_internal(as, addr, attrs, result,
3228 DEVICE_NATIVE_ENDIAN);
3229}
3230
3231uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3232 MemTxAttrs attrs, MemTxResult *result)
3233{
3234 return address_space_ldq_internal(as, addr, attrs, result,
3235 DEVICE_LITTLE_ENDIAN);
3236}
3237
3238uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3239 MemTxAttrs attrs, MemTxResult *result)
3240{
3241 return address_space_ldq_internal(as, addr, attrs, result,
3242 DEVICE_BIG_ENDIAN);
3243}
3244
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003245uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003246{
Peter Maydell50013112015-04-26 16:49:24 +01003247 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248}
3249
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003250uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003251{
Peter Maydell50013112015-04-26 16:49:24 +01003252 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003253}
3254
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003255uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003256{
Peter Maydell50013112015-04-26 16:49:24 +01003257 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003258}
3259
bellardaab33092005-10-30 20:48:42 +00003260/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003261uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3262 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003263{
3264 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003265 MemTxResult r;
3266
3267 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3268 if (result) {
3269 *result = r;
3270 }
bellardaab33092005-10-30 20:48:42 +00003271 return val;
3272}
3273
Peter Maydell50013112015-04-26 16:49:24 +01003274uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3275{
3276 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3277}
3278
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003279/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003280static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3281 hwaddr addr,
3282 MemTxAttrs attrs,
3283 MemTxResult *result,
3284 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003285{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003286 uint8_t *ptr;
3287 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003288 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003289 hwaddr l = 2;
3290 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003291 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003292 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003293
Paolo Bonzini41063e12015-03-18 14:21:43 +01003294 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003295 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003296 false);
3297 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003298 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003299
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003300 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003301 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003302#if defined(TARGET_WORDS_BIGENDIAN)
3303 if (endian == DEVICE_LITTLE_ENDIAN) {
3304 val = bswap16(val);
3305 }
3306#else
3307 if (endian == DEVICE_BIG_ENDIAN) {
3308 val = bswap16(val);
3309 }
3310#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003311 } else {
3312 /* RAM case */
Gonglei3655cb92016-02-20 10:35:20 +08003313 ptr = qemu_get_ram_ptr(mr->ram_block,
3314 (memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003315 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003316 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003317 switch (endian) {
3318 case DEVICE_LITTLE_ENDIAN:
3319 val = lduw_le_p(ptr);
3320 break;
3321 case DEVICE_BIG_ENDIAN:
3322 val = lduw_be_p(ptr);
3323 break;
3324 default:
3325 val = lduw_p(ptr);
3326 break;
3327 }
Peter Maydell50013112015-04-26 16:49:24 +01003328 r = MEMTX_OK;
3329 }
3330 if (result) {
3331 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003332 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003333 if (release_lock) {
3334 qemu_mutex_unlock_iothread();
3335 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003336 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003337 return val;
bellardaab33092005-10-30 20:48:42 +00003338}
3339
Peter Maydell50013112015-04-26 16:49:24 +01003340uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3341 MemTxAttrs attrs, MemTxResult *result)
3342{
3343 return address_space_lduw_internal(as, addr, attrs, result,
3344 DEVICE_NATIVE_ENDIAN);
3345}
3346
3347uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3348 MemTxAttrs attrs, MemTxResult *result)
3349{
3350 return address_space_lduw_internal(as, addr, attrs, result,
3351 DEVICE_LITTLE_ENDIAN);
3352}
3353
3354uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3355 MemTxAttrs attrs, MemTxResult *result)
3356{
3357 return address_space_lduw_internal(as, addr, attrs, result,
3358 DEVICE_BIG_ENDIAN);
3359}
3360
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003361uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003362{
Peter Maydell50013112015-04-26 16:49:24 +01003363 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003364}
3365
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003366uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003367{
Peter Maydell50013112015-04-26 16:49:24 +01003368 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003369}
3370
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003371uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003372{
Peter Maydell50013112015-04-26 16:49:24 +01003373 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003374}
3375
bellard8df1cd02005-01-28 22:37:22 +00003376/* warning: addr must be aligned. The ram page is not masked as dirty
3377 and the code inside is not invalidated. It is useful if the dirty
3378 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003379void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3380 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003381{
bellard8df1cd02005-01-28 22:37:22 +00003382 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003383 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003384 hwaddr l = 4;
3385 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003386 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003387 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003388 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003389
Paolo Bonzini41063e12015-03-18 14:21:43 +01003390 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003391 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003392 true);
3393 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003394 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003395
Peter Maydell50013112015-04-26 16:49:24 +01003396 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003397 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003398 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003399 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003400 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003401
Paolo Bonzini845b6212015-03-23 11:45:53 +01003402 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3403 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini58d27072015-03-23 11:56:01 +01003404 cpu_physical_memory_set_dirty_range(addr1, 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003405 r = MEMTX_OK;
3406 }
3407 if (result) {
3408 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003409 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003410 if (release_lock) {
3411 qemu_mutex_unlock_iothread();
3412 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003413 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003414}
3415
Peter Maydell50013112015-04-26 16:49:24 +01003416void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3417{
3418 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3419}
3420
bellard8df1cd02005-01-28 22:37:22 +00003421/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003422static inline void address_space_stl_internal(AddressSpace *as,
3423 hwaddr addr, uint32_t val,
3424 MemTxAttrs attrs,
3425 MemTxResult *result,
3426 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003427{
bellard8df1cd02005-01-28 22:37:22 +00003428 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003429 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003430 hwaddr l = 4;
3431 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003432 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003433 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003434
Paolo Bonzini41063e12015-03-18 14:21:43 +01003435 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003436 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003437 true);
3438 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003439 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003440
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003441#if defined(TARGET_WORDS_BIGENDIAN)
3442 if (endian == DEVICE_LITTLE_ENDIAN) {
3443 val = bswap32(val);
3444 }
3445#else
3446 if (endian == DEVICE_BIG_ENDIAN) {
3447 val = bswap32(val);
3448 }
3449#endif
Peter Maydell50013112015-04-26 16:49:24 +01003450 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003451 } else {
bellard8df1cd02005-01-28 22:37:22 +00003452 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003453 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003454 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003455 switch (endian) {
3456 case DEVICE_LITTLE_ENDIAN:
3457 stl_le_p(ptr, val);
3458 break;
3459 case DEVICE_BIG_ENDIAN:
3460 stl_be_p(ptr, val);
3461 break;
3462 default:
3463 stl_p(ptr, val);
3464 break;
3465 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003466 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003467 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003468 }
Peter Maydell50013112015-04-26 16:49:24 +01003469 if (result) {
3470 *result = r;
3471 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003472 if (release_lock) {
3473 qemu_mutex_unlock_iothread();
3474 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003475 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003476}
3477
3478void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3479 MemTxAttrs attrs, MemTxResult *result)
3480{
3481 address_space_stl_internal(as, addr, val, attrs, result,
3482 DEVICE_NATIVE_ENDIAN);
3483}
3484
3485void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3486 MemTxAttrs attrs, MemTxResult *result)
3487{
3488 address_space_stl_internal(as, addr, val, attrs, result,
3489 DEVICE_LITTLE_ENDIAN);
3490}
3491
3492void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3493 MemTxAttrs attrs, MemTxResult *result)
3494{
3495 address_space_stl_internal(as, addr, val, attrs, result,
3496 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003497}
3498
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003499void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003500{
Peter Maydell50013112015-04-26 16:49:24 +01003501 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502}
3503
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003504void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003505{
Peter Maydell50013112015-04-26 16:49:24 +01003506 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507}
3508
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003509void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003510{
Peter Maydell50013112015-04-26 16:49:24 +01003511 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003512}
3513
bellardaab33092005-10-30 20:48:42 +00003514/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003515void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3516 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003517{
3518 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003519 MemTxResult r;
3520
3521 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3522 if (result) {
3523 *result = r;
3524 }
3525}
3526
3527void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3528{
3529 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003530}
3531
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003532/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003533static inline void address_space_stw_internal(AddressSpace *as,
3534 hwaddr addr, uint32_t val,
3535 MemTxAttrs attrs,
3536 MemTxResult *result,
3537 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003538{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003539 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003540 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003541 hwaddr l = 2;
3542 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003543 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003544 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003545
Paolo Bonzini41063e12015-03-18 14:21:43 +01003546 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003547 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003548 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003549 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003550
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003551#if defined(TARGET_WORDS_BIGENDIAN)
3552 if (endian == DEVICE_LITTLE_ENDIAN) {
3553 val = bswap16(val);
3554 }
3555#else
3556 if (endian == DEVICE_BIG_ENDIAN) {
3557 val = bswap16(val);
3558 }
3559#endif
Peter Maydell50013112015-04-26 16:49:24 +01003560 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003561 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003562 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003563 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Gonglei3655cb92016-02-20 10:35:20 +08003564 ptr = qemu_get_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003565 switch (endian) {
3566 case DEVICE_LITTLE_ENDIAN:
3567 stw_le_p(ptr, val);
3568 break;
3569 case DEVICE_BIG_ENDIAN:
3570 stw_be_p(ptr, val);
3571 break;
3572 default:
3573 stw_p(ptr, val);
3574 break;
3575 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003576 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003577 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003578 }
Peter Maydell50013112015-04-26 16:49:24 +01003579 if (result) {
3580 *result = r;
3581 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003582 if (release_lock) {
3583 qemu_mutex_unlock_iothread();
3584 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003585 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003586}
3587
3588void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3589 MemTxAttrs attrs, MemTxResult *result)
3590{
3591 address_space_stw_internal(as, addr, val, attrs, result,
3592 DEVICE_NATIVE_ENDIAN);
3593}
3594
3595void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3596 MemTxAttrs attrs, MemTxResult *result)
3597{
3598 address_space_stw_internal(as, addr, val, attrs, result,
3599 DEVICE_LITTLE_ENDIAN);
3600}
3601
3602void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3603 MemTxAttrs attrs, MemTxResult *result)
3604{
3605 address_space_stw_internal(as, addr, val, attrs, result,
3606 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003607}
3608
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003609void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003610{
Peter Maydell50013112015-04-26 16:49:24 +01003611 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003612}
3613
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003614void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003615{
Peter Maydell50013112015-04-26 16:49:24 +01003616 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003617}
3618
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003619void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003620{
Peter Maydell50013112015-04-26 16:49:24 +01003621 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003622}
3623
bellardaab33092005-10-30 20:48:42 +00003624/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003625void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3626 MemTxAttrs attrs, MemTxResult *result)
3627{
3628 MemTxResult r;
3629 val = tswap64(val);
3630 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3631 if (result) {
3632 *result = r;
3633 }
3634}
3635
3636void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3637 MemTxAttrs attrs, MemTxResult *result)
3638{
3639 MemTxResult r;
3640 val = cpu_to_le64(val);
3641 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3642 if (result) {
3643 *result = r;
3644 }
3645}
3646void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3647 MemTxAttrs attrs, MemTxResult *result)
3648{
3649 MemTxResult r;
3650 val = cpu_to_be64(val);
3651 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3652 if (result) {
3653 *result = r;
3654 }
3655}
3656
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003657void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003658{
Peter Maydell50013112015-04-26 16:49:24 +01003659 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003660}
3661
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003662void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003663{
Peter Maydell50013112015-04-26 16:49:24 +01003664 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003665}
3666
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003667void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003668{
Peter Maydell50013112015-04-26 16:49:24 +01003669 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003670}
3671
aliguori5e2972f2009-03-28 17:51:36 +00003672/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003673int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003674 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003675{
3676 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003677 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003678 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003679
3680 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003681 int asidx;
3682 MemTxAttrs attrs;
3683
bellard13eb76e2004-01-24 15:23:36 +00003684 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003685 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3686 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003687 /* if no physical page mapped, return an error */
3688 if (phys_addr == -1)
3689 return -1;
3690 l = (page + TARGET_PAGE_SIZE) - addr;
3691 if (l > len)
3692 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003693 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003694 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003695 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3696 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003697 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003698 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3699 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003700 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003701 }
bellard13eb76e2004-01-24 15:23:36 +00003702 len -= l;
3703 buf += l;
3704 addr += l;
3705 }
3706 return 0;
3707}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003708
3709/*
3710 * Allows code that needs to deal with migration bitmaps etc to still be built
3711 * target independent.
3712 */
3713size_t qemu_target_page_bits(void)
3714{
3715 return TARGET_PAGE_BITS;
3716}
3717
Paul Brooka68fe892010-03-01 00:08:59 +00003718#endif
bellard13eb76e2004-01-24 15:23:36 +00003719
Blue Swirl8e4a4242013-01-06 18:30:17 +00003720/*
3721 * A helper function for the _utterly broken_ virtio device model to find out if
3722 * it's running on a big endian machine. Don't do this at home kids!
3723 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003724bool target_words_bigendian(void);
3725bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003726{
3727#if defined(TARGET_WORDS_BIGENDIAN)
3728 return true;
3729#else
3730 return false;
3731#endif
3732}
3733
Wen Congyang76f35532012-05-07 12:04:18 +08003734#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003735bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003736{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003737 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003738 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003739 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003740
Paolo Bonzini41063e12015-03-18 14:21:43 +01003741 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003742 mr = address_space_translate(&address_space_memory,
3743 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003744
Paolo Bonzini41063e12015-03-18 14:21:43 +01003745 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3746 rcu_read_unlock();
3747 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003748}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003749
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003750int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003751{
3752 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003753 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003754
Mike Day0dc3f442013-09-05 14:41:35 -04003755 rcu_read_lock();
3756 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003757 ret = func(block->idstr, block->host, block->offset,
3758 block->used_length, opaque);
3759 if (ret) {
3760 break;
3761 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003762 }
Mike Day0dc3f442013-09-05 14:41:35 -04003763 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003764 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003765}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003766#endif