blob: 8ffde75983e082aa4206cd0417104ff02fffb736 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Peter Lieven101420b2016-07-15 12:03:50 +0200190 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200195 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197}
198
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200{
201 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200202 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 PhysPageEntry e;
204 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210
211 e.skip = leaf ? 0 : 1;
212 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200214 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200217}
218
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200219static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
220 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200221 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222{
223 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200226 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200229 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200231
Paolo Bonzini03f49952013-11-07 17:14:36 +0100232 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200234 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200235 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200236 *index += step;
237 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200238 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200239 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200240 }
241 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200242 }
243}
244
Avi Kivityac1970f2012-10-03 16:22:53 +0200245static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200246 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200247 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000248{
Avi Kivity29990972012-02-13 20:21:20 +0200249 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000251
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200252 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000253}
254
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200255/* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
257 */
258static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
259{
260 unsigned valid_ptr = P_L2_SIZE;
261 int valid = 0;
262 PhysPageEntry *p;
263 int i;
264
265 if (lp->ptr == PHYS_MAP_NODE_NIL) {
266 return;
267 }
268
269 p = nodes[lp->ptr];
270 for (i = 0; i < P_L2_SIZE; i++) {
271 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272 continue;
273 }
274
275 valid_ptr = i;
276 valid++;
277 if (p[i].skip) {
278 phys_page_compact(&p[i], nodes, compacted);
279 }
280 }
281
282 /* We can only compress if there's only one child. */
283 if (valid != 1) {
284 return;
285 }
286
287 assert(valid_ptr < P_L2_SIZE);
288
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291 return;
292 }
293
294 lp->ptr = p[valid_ptr].ptr;
295 if (!p[valid_ptr].skip) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
300 * change this rule.
301 */
302 lp->skip = 0;
303 } else {
304 lp->skip += p[valid_ptr].skip;
305 }
306}
307
308static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309{
310 DECLARE_BITMAP(compacted, nodes_nb);
311
312 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200313 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200314 }
315}
316
Fam Zheng29cb5332016-03-01 14:18:23 +0800317static inline bool section_covers_addr(const MemoryRegionSection *section,
318 hwaddr addr)
319{
320 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
321 * the section must cover the entire address space.
322 */
323 return section->size.hi ||
324 range_covers_byte(section->offset_within_address_space,
325 section->size.lo, addr);
326}
327
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200328static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200329 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000330{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200332 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200334
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200335 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200336 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200338 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200339 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100340 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200341 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342
Fam Zheng29cb5332016-03-01 14:18:23 +0800343 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 return &sections[lp.ptr];
345 } else {
346 return &sections[PHYS_SECTION_UNASSIGNED];
347 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200348}
349
Blue Swirle5548612012-04-21 13:08:33 +0000350bool memory_region_is_unassigned(MemoryRegion *mr)
351{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200352 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000353 && mr != &io_mem_watch;
354}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200355
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100356/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200357static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200358 hwaddr addr,
359 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200360{
Fam Zheng729633c2016-03-01 14:18:24 +0800361 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800363 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200364
Fam Zheng729633c2016-03-01 14:18:24 +0800365 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
366 section_covers_addr(section, addr)) {
367 update = false;
368 } else {
369 section = phys_page_find(d->phys_map, addr, d->map.nodes,
370 d->map.sections);
371 update = true;
372 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 if (resolve_subpage && section->mr->subpage) {
374 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200375 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200376 }
Fam Zheng729633c2016-03-01 14:18:24 +0800377 if (update) {
378 atomic_set(&d->mru_section, section);
379 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200381}
382
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100383/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200384static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200385address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200386 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387{
388 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100390 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200392 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 /* Compute offset within MemoryRegionSection */
394 addr -= section->offset_within_address_space;
395
396 /* Compute offset within MemoryRegion */
397 *xlat = addr + section->offset_within_region;
398
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200399 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200400
401 /* MMIO registers can be expected to perform full-width accesses based only
402 * on their address, without considering adjacent registers that could
403 * decode to completely different MemoryRegions. When such registers
404 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405 * regions overlap wildly. For this reason we cannot clamp the accesses
406 * here.
407 *
408 * If the length is small (as is the case for address_space_ldl/stl),
409 * everything works fine. If the incoming length is large, however,
410 * the caller really has to do the clamping through memory_access_size.
411 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200413 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200414 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
415 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200416 return section;
417}
Jan Kiszka90260c62013-05-26 21:46:51 +0200418
Paolo Bonzini41063e12015-03-18 14:21:43 +0100419/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200420MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
421 hwaddr *xlat, hwaddr *plen,
422 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200423{
Avi Kivity30951152012-10-30 13:47:46 +0200424 IOMMUTLBEntry iotlb;
425 MemoryRegionSection *section;
426 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200427
428 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100429 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
430 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200431 mr = section->mr;
432
433 if (!mr->iommu_ops) {
434 break;
435 }
436
Le Tan8d7b8cb2014-08-16 13:55:37 +0800437 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200438 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
439 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200441 if (!(iotlb.perm & (1 << is_write))) {
442 mr = &io_mem_unassigned;
443 break;
444 }
445
446 as = iotlb.target_as;
447 }
448
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000449 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700451 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100452 }
453
Avi Kivity30951152012-10-30 13:47:46 +0200454 *xlat = addr;
455 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200456}
457
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100458/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200459MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000460address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200461 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200462{
Avi Kivity30951152012-10-30 13:47:46 +0200463 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000464 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
465
466 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200467
468 assert(!section->mr->iommu_ops);
469 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200470}
bellard9fa3e852004-01-04 18:06:42 +0000471#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000472
Andreas Färberb170fce2013-01-20 20:23:22 +0100473#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000474
Juan Quintelae59fb372009-09-29 22:48:21 +0200475static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476{
Andreas Färber259186a2013-01-17 18:51:17 +0100477 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200478
aurel323098dba2009-03-07 21:28:24 +0000479 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
480 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100481 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100482 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000483
484 return 0;
485}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487static int cpu_common_pre_load(void *opaque)
488{
489 CPUState *cpu = opaque;
490
Paolo Bonziniadee6422014-12-19 12:53:14 +0100491 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400492
493 return 0;
494}
495
496static bool cpu_common_exception_index_needed(void *opaque)
497{
498 CPUState *cpu = opaque;
499
Paolo Bonziniadee6422014-12-19 12:53:14 +0100500 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400501}
502
503static const VMStateDescription vmstate_cpu_common_exception_index = {
504 .name = "cpu_common/exception_index",
505 .version_id = 1,
506 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200507 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400508 .fields = (VMStateField[]) {
509 VMSTATE_INT32(exception_index, CPUState),
510 VMSTATE_END_OF_LIST()
511 }
512};
513
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300514static bool cpu_common_crash_occurred_needed(void *opaque)
515{
516 CPUState *cpu = opaque;
517
518 return cpu->crash_occurred;
519}
520
521static const VMStateDescription vmstate_cpu_common_crash_occurred = {
522 .name = "cpu_common/crash_occurred",
523 .version_id = 1,
524 .minimum_version_id = 1,
525 .needed = cpu_common_crash_occurred_needed,
526 .fields = (VMStateField[]) {
527 VMSTATE_BOOL(crash_occurred, CPUState),
528 VMSTATE_END_OF_LIST()
529 }
530};
531
Andreas Färber1a1562f2013-06-17 04:09:11 +0200532const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200533 .name = "cpu_common",
534 .version_id = 1,
535 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200537 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200538 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100539 VMSTATE_UINT32(halted, CPUState),
540 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400542 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 .subsections = (const VMStateDescription*[]) {
544 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300545 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200546 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200547 }
548};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200549
pbrook9656f322008-07-01 20:01:19 +0000550#endif
551
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100552CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400553{
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200558 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100559 }
Glauber Costa950f1472009-06-09 12:15:18 -0400560 }
561
Andreas Färberbdc44642013-06-24 23:50:24 +0200562 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400563}
564
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000566void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000567{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000568 CPUAddressSpace *newas;
569
570 /* Target code should have set num_ases before calling us */
571 assert(asidx < cpu->num_ases);
572
Peter Maydell56943e82016-01-21 14:15:04 +0000573 if (asidx == 0) {
574 /* address space 0 gets the convenience alias */
575 cpu->as = as;
576 }
577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 /* KVM cannot currently support multiple address spaces. */
579 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000581 if (!cpu->cpu_ases) {
582 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000583 }
Peter Maydell32857f42015-10-01 15:29:50 +0100584
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000585 newas = &cpu->cpu_ases[asidx];
586 newas->cpu = cpu;
587 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000588 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000589 newas->tcg_as_listener.commit = tcg_commit;
590 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000591 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000592}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000593
594AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
595{
596 /* Return the AddressSpace corresponding to the specified index */
597 return cpu->cpu_ases[asidx].as;
598}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599#endif
600
Igor Mammedova07f9532016-07-25 11:59:21 +0200601static int cpu_get_free_index(void)
Bharata B Raob7bca732015-06-23 19:31:13 -0700602{
603 CPUState *some_cpu;
604 int cpu_index = 0;
605
606 CPU_FOREACH(some_cpu) {
607 cpu_index++;
608 }
609 return cpu_index;
610}
611
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530612void cpu_exec_exit(CPUState *cpu)
613{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530614 CPUClass *cc = CPU_GET_CLASS(cpu);
615
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530616 cpu_list_lock();
Igor Mammedov8b1b8352016-07-25 11:59:20 +0200617 if (cpu->node.tqe_prev == NULL) {
618 /* there is nothing to undo since cpu_exec_init() hasn't been called */
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530619 cpu_list_unlock();
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530620 return;
621 }
622
623 QTAILQ_REMOVE(&cpus, cpu, node);
Igor Mammedov8b1b8352016-07-25 11:59:20 +0200624 cpu->node.tqe_prev = NULL;
Igor Mammedova07f9532016-07-25 11:59:21 +0200625 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530626 cpu_list_unlock();
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530627
628 if (cc->vmsd != NULL) {
629 vmstate_unregister(NULL, cc->vmsd, cpu);
630 }
631 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
632 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
633 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530634}
635
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700636void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000637{
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200638 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Igor Mammedova07f9532016-07-25 11:59:21 +0200639 Error *local_err ATTRIBUTE_UNUSED = NULL;
bellard6a00d602005-11-21 23:25:50 +0000640
Peter Maydell56943e82016-01-21 14:15:04 +0000641 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000642 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000643
Eduardo Habkost291135b2015-04-27 17:00:33 -0300644#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300645 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000646
647 /* This is a softmmu CPU object, so create a property for it
648 * so users can wire up its memory. (This can't go in qom/cpu.c
649 * because that file is compiled only once for both user-mode
650 * and system builds.) The default if no link is set up is to use
651 * the system address space.
652 */
653 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
654 (Object **)&cpu->memory,
655 qdev_prop_allow_set_link_before_realize,
656 OBJ_PROP_LINK_UNREF_ON_RELEASE,
657 &error_abort);
658 cpu->memory = system_memory;
659 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300660#endif
661
pbrookc2764712009-03-07 15:24:59 +0000662 cpu_list_lock();
Igor Mammedova07f9532016-07-25 11:59:21 +0200663 if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
664 cpu->cpu_index = cpu_get_free_index();
665 assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
bellard6a00d602005-11-21 23:25:50 +0000666 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200667 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000668 cpu_list_unlock();
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200669
670#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200671 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200672 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200673 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100674 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200675 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100676 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200677#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000678}
679
Paul Brook94df27f2010-02-28 23:47:45 +0000680#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200681static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000682{
683 tb_invalidate_phys_page_range(pc, pc + 1, 0);
684}
685#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200686static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400687{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000688 MemTxAttrs attrs;
689 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
690 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400691 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000692 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100693 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400694 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400695}
bellardc27004e2005-01-03 23:35:10 +0000696#endif
bellardd720b932004-04-25 17:57:43 +0000697
Paul Brookc527ee82010-03-01 03:31:14 +0000698#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200699void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000700
701{
702}
703
Peter Maydell3ee887e2014-09-12 14:06:48 +0100704int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
705 int flags)
706{
707 return -ENOSYS;
708}
709
710void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
711{
712}
713
Andreas Färber75a34032013-09-02 16:57:02 +0200714int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000715 int flags, CPUWatchpoint **watchpoint)
716{
717 return -ENOSYS;
718}
719#else
pbrook6658ffb2007-03-16 23:58:11 +0000720/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200721int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000722 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000723{
aliguoric0ce9982008-11-25 22:13:57 +0000724 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000725
Peter Maydell05068c02014-09-12 14:06:48 +0100726 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700727 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200728 error_report("tried to set invalid watchpoint at %"
729 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000730 return -EINVAL;
731 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500732 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000733
aliguoria1d1bb32008-11-18 20:07:32 +0000734 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100735 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000736 wp->flags = flags;
737
aliguori2dc9f412008-11-18 20:56:59 +0000738 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200739 if (flags & BP_GDB) {
740 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
741 } else {
742 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
743 }
aliguoria1d1bb32008-11-18 20:07:32 +0000744
Andreas Färber31b030d2013-09-04 01:29:02 +0200745 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000746
747 if (watchpoint)
748 *watchpoint = wp;
749 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000750}
751
aliguoria1d1bb32008-11-18 20:07:32 +0000752/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200753int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000754 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000755{
aliguoria1d1bb32008-11-18 20:07:32 +0000756 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000757
Andreas Färberff4700b2013-08-26 18:23:18 +0200758 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100759 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000760 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200761 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000762 return 0;
763 }
764 }
aliguoria1d1bb32008-11-18 20:07:32 +0000765 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000766}
767
aliguoria1d1bb32008-11-18 20:07:32 +0000768/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200769void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000770{
Andreas Färberff4700b2013-08-26 18:23:18 +0200771 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000772
Andreas Färber31b030d2013-09-04 01:29:02 +0200773 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000774
Anthony Liguori7267c092011-08-20 22:09:37 -0500775 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000776}
777
aliguoria1d1bb32008-11-18 20:07:32 +0000778/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200779void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000780{
aliguoric0ce9982008-11-25 22:13:57 +0000781 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000782
Andreas Färberff4700b2013-08-26 18:23:18 +0200783 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200784 if (wp->flags & mask) {
785 cpu_watchpoint_remove_by_ref(cpu, wp);
786 }
aliguoric0ce9982008-11-25 22:13:57 +0000787 }
aliguoria1d1bb32008-11-18 20:07:32 +0000788}
Peter Maydell05068c02014-09-12 14:06:48 +0100789
790/* Return true if this watchpoint address matches the specified
791 * access (ie the address range covered by the watchpoint overlaps
792 * partially or completely with the address range covered by the
793 * access).
794 */
795static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
796 vaddr addr,
797 vaddr len)
798{
799 /* We know the lengths are non-zero, but a little caution is
800 * required to avoid errors in the case where the range ends
801 * exactly at the top of the address space and so addr + len
802 * wraps round to zero.
803 */
804 vaddr wpend = wp->vaddr + wp->len - 1;
805 vaddr addrend = addr + len - 1;
806
807 return !(addr > wpend || wp->vaddr > addrend);
808}
809
Paul Brookc527ee82010-03-01 03:31:14 +0000810#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000811
812/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200813int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000814 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000815{
aliguoric0ce9982008-11-25 22:13:57 +0000816 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000817
Anthony Liguori7267c092011-08-20 22:09:37 -0500818 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000819
820 bp->pc = pc;
821 bp->flags = flags;
822
aliguori2dc9f412008-11-18 20:56:59 +0000823 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200824 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200825 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200826 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200827 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200828 }
aliguoria1d1bb32008-11-18 20:07:32 +0000829
Andreas Färberf0c3c502013-08-26 21:22:53 +0200830 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000831
Andreas Färber00b941e2013-06-29 18:55:54 +0200832 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000833 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200834 }
aliguoria1d1bb32008-11-18 20:07:32 +0000835 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000836}
837
838/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200839int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000840{
aliguoria1d1bb32008-11-18 20:07:32 +0000841 CPUBreakpoint *bp;
842
Andreas Färberf0c3c502013-08-26 21:22:53 +0200843 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000844 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200845 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000846 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000847 }
bellard4c3a88a2003-07-26 12:06:08 +0000848 }
aliguoria1d1bb32008-11-18 20:07:32 +0000849 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000850}
851
aliguoria1d1bb32008-11-18 20:07:32 +0000852/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200853void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000854{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200855 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
856
857 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000858
Anthony Liguori7267c092011-08-20 22:09:37 -0500859 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000860}
861
862/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200863void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000864{
aliguoric0ce9982008-11-25 22:13:57 +0000865 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000866
Andreas Färberf0c3c502013-08-26 21:22:53 +0200867 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200868 if (bp->flags & mask) {
869 cpu_breakpoint_remove_by_ref(cpu, bp);
870 }
aliguoric0ce9982008-11-25 22:13:57 +0000871 }
bellard4c3a88a2003-07-26 12:06:08 +0000872}
873
bellardc33a3462003-07-29 20:50:33 +0000874/* enable or disable single step mode. EXCP_DEBUG is returned by the
875 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200876void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000877{
Andreas Färbered2803d2013-06-21 20:20:45 +0200878 if (cpu->singlestep_enabled != enabled) {
879 cpu->singlestep_enabled = enabled;
880 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200881 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200882 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100883 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000884 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700885 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000886 }
bellardc33a3462003-07-29 20:50:33 +0000887 }
bellardc33a3462003-07-29 20:50:33 +0000888}
889
Andreas Färbera47dddd2013-09-03 17:38:47 +0200890void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000891{
892 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000893 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000894
895 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000896 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000897 fprintf(stderr, "qemu: fatal: ");
898 vfprintf(stderr, fmt, ap);
899 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200900 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100901 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000902 qemu_log("qemu: fatal: ");
903 qemu_log_vprintf(fmt, ap2);
904 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200905 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000906 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000907 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000908 }
pbrook493ae1f2007-11-23 16:53:59 +0000909 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000910 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300911 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200912#if defined(CONFIG_USER_ONLY)
913 {
914 struct sigaction act;
915 sigfillset(&act.sa_mask);
916 act.sa_handler = SIG_DFL;
917 sigaction(SIGABRT, &act, NULL);
918 }
919#endif
bellard75012672003-06-21 13:11:07 +0000920 abort();
921}
922
bellard01243112004-01-04 15:48:17 +0000923#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400924/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200925static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
926{
927 RAMBlock *block;
928
Paolo Bonzini43771532013-09-09 17:58:40 +0200929 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200930 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200931 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200932 }
Mike Day0dc3f442013-09-05 14:41:35 -0400933 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200934 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200935 goto found;
936 }
937 }
938
939 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
940 abort();
941
942found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200943 /* It is safe to write mru_block outside the iothread lock. This
944 * is what happens:
945 *
946 * mru_block = xxx
947 * rcu_read_unlock()
948 * xxx removed from list
949 * rcu_read_lock()
950 * read mru_block
951 * mru_block = NULL;
952 * call_rcu(reclaim_ramblock, xxx);
953 * rcu_read_unlock()
954 *
955 * atomic_rcu_set is not needed here. The block was already published
956 * when it was placed into the list. Here we're just making an extra
957 * copy of the pointer.
958 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200959 ram_list.mru_block = block;
960 return block;
961}
962
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200963static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000964{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700965 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200966 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200967 RAMBlock *block;
968 ram_addr_t end;
969
970 end = TARGET_PAGE_ALIGN(start + length);
971 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000972
Mike Day0dc3f442013-09-05 14:41:35 -0400973 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200974 block = qemu_get_ram_block(start);
975 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200976 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700977 CPU_FOREACH(cpu) {
978 tlb_reset_dirty(cpu, start1, length);
979 }
Mike Day0dc3f442013-09-05 14:41:35 -0400980 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200981}
982
983/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000984bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
985 ram_addr_t length,
986 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200987{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000988 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000989 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000990 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200991
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000992 if (length == 0) {
993 return false;
994 }
995
996 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
997 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000998
999 rcu_read_lock();
1000
1001 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1002
1003 while (page < end) {
1004 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1005 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1006 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1007
1008 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1009 offset, num);
1010 page += num;
1011 }
1012
1013 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001014
1015 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001016 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001017 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001018
1019 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001020}
1021
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001022/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001023hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001024 MemoryRegionSection *section,
1025 target_ulong vaddr,
1026 hwaddr paddr, hwaddr xlat,
1027 int prot,
1028 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001029{
Avi Kivitya8170e52012-10-23 12:30:10 +02001030 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001031 CPUWatchpoint *wp;
1032
Blue Swirlcc5bea62012-04-14 14:56:48 +00001033 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001034 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001035 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001036 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001037 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001038 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001039 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001040 }
1041 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001042 AddressSpaceDispatch *d;
1043
1044 d = atomic_rcu_read(&section->address_space->dispatch);
1045 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001046 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001047 }
1048
1049 /* Make accesses to pages with watchpoints go via the
1050 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001051 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001052 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001053 /* Avoid trapping reads of pages with a write breakpoint. */
1054 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001055 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001056 *address |= TLB_MMIO;
1057 break;
1058 }
1059 }
1060 }
1061
1062 return iotlb;
1063}
bellard9fa3e852004-01-04 18:06:42 +00001064#endif /* defined(CONFIG_USER_ONLY) */
1065
pbrooke2eef172008-06-08 01:09:01 +00001066#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001067
Anthony Liguoric227f092009-10-01 16:12:16 -05001068static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001069 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001070static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001071
Igor Mammedova2b257d2014-10-31 16:38:37 +00001072static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1073 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001074
1075/*
1076 * Set a custom physical guest memory alloator.
1077 * Accelerators with unusual needs may need this. Hopefully, we can
1078 * get rid of it eventually.
1079 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001080void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001081{
1082 phys_mem_alloc = alloc;
1083}
1084
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001085static uint16_t phys_section_add(PhysPageMap *map,
1086 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001087{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001088 /* The physical section number is ORed with a page-aligned
1089 * pointer to produce the iotlb entries. Thus it should
1090 * never overflow into the page-aligned value.
1091 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001092 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001093
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001094 if (map->sections_nb == map->sections_nb_alloc) {
1095 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1096 map->sections = g_renew(MemoryRegionSection, map->sections,
1097 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001098 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001100 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001101 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001102}
1103
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001104static void phys_section_destroy(MemoryRegion *mr)
1105{
Don Slutz55b4e802015-11-30 17:11:04 -05001106 bool have_sub_page = mr->subpage;
1107
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001108 memory_region_unref(mr);
1109
Don Slutz55b4e802015-11-30 17:11:04 -05001110 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001111 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001112 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001113 g_free(subpage);
1114 }
1115}
1116
Paolo Bonzini60926662013-05-29 12:30:26 +02001117static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001118{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001119 while (map->sections_nb > 0) {
1120 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001121 phys_section_destroy(section->mr);
1122 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001123 g_free(map->sections);
1124 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001125}
1126
Avi Kivityac1970f2012-10-03 16:22:53 +02001127static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001128{
1129 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001130 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001131 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001132 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001133 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001134 MemoryRegionSection subsection = {
1135 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001136 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001137 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001138 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001139
Avi Kivityf3705d52012-03-08 16:16:34 +02001140 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001141
Avi Kivityf3705d52012-03-08 16:16:34 +02001142 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001143 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001144 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001145 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001146 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001147 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001148 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001149 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001150 }
1151 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001152 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001153 subpage_register(subpage, start, end,
1154 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001155}
1156
1157
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001158static void register_multipage(AddressSpaceDispatch *d,
1159 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001160{
Avi Kivitya8170e52012-10-23 12:30:10 +02001161 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001162 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001163 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1164 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001165
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001166 assert(num_pages);
1167 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001168}
1169
Avi Kivityac1970f2012-10-03 16:22:53 +02001170static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001171{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001172 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001173 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001174 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001175 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001176
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001177 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1178 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1179 - now.offset_within_address_space;
1180
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001181 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001182 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001183 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001184 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001185 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001186 while (int128_ne(remain.size, now.size)) {
1187 remain.size = int128_sub(remain.size, now.size);
1188 remain.offset_within_address_space += int128_get64(now.size);
1189 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001190 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001191 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001192 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001193 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001194 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001195 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001196 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001197 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001198 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001199 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001200 }
1201}
1202
Sheng Yang62a27442010-01-26 19:21:16 +08001203void qemu_flush_coalesced_mmio_buffer(void)
1204{
1205 if (kvm_enabled())
1206 kvm_flush_coalesced_mmio_buffer();
1207}
1208
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001209void qemu_mutex_lock_ramlist(void)
1210{
1211 qemu_mutex_lock(&ram_list.mutex);
1212}
1213
1214void qemu_mutex_unlock_ramlist(void)
1215{
1216 qemu_mutex_unlock(&ram_list.mutex);
1217}
1218
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001219#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001220static void *file_ram_alloc(RAMBlock *block,
1221 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001222 const char *path,
1223 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001224{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001225 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001226 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001227 char *sanitized_name;
1228 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001229 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001230 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001231 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001232
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001233 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1234 error_setg(errp,
1235 "host lacks kvm mmu notifiers, -mem-path unsupported");
1236 return NULL;
1237 }
1238
1239 for (;;) {
1240 fd = open(path, O_RDWR);
1241 if (fd >= 0) {
1242 /* @path names an existing file, use it */
1243 break;
1244 }
1245 if (errno == ENOENT) {
1246 /* @path names a file that doesn't exist, create it */
1247 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1248 if (fd >= 0) {
1249 unlink_on_error = true;
1250 break;
1251 }
1252 } else if (errno == EISDIR) {
1253 /* @path names a directory, create a file there */
1254 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1255 sanitized_name = g_strdup(memory_region_name(block->mr));
1256 for (c = sanitized_name; *c != '\0'; c++) {
1257 if (*c == '/') {
1258 *c = '_';
1259 }
1260 }
1261
1262 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1263 sanitized_name);
1264 g_free(sanitized_name);
1265
1266 fd = mkstemp(filename);
1267 if (fd >= 0) {
1268 unlink(filename);
1269 g_free(filename);
1270 break;
1271 }
1272 g_free(filename);
1273 }
1274 if (errno != EEXIST && errno != EINTR) {
1275 error_setg_errno(errp, errno,
1276 "can't open backing store %s for guest RAM",
1277 path);
1278 goto error;
1279 }
1280 /*
1281 * Try again on EINTR and EEXIST. The latter happens when
1282 * something else creates the file between our two open().
1283 */
1284 }
1285
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001286 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001287 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001288
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001289 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001290 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001291 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001292 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001293 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001294 }
1295
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001296 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001297
1298 /*
1299 * ftruncate is not supported by hugetlbfs in older
1300 * hosts, so don't bother bailing out on errors.
1301 * If anything goes wrong with it under other filesystems,
1302 * mmap will fail.
1303 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001304 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001305 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001306 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001307
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001308 area = qemu_ram_mmap(fd, memory, block->mr->align,
1309 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001310 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001311 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001312 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001313 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001314 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001315
1316 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001317 os_mem_prealloc(fd, area, memory, errp);
1318 if (errp && *errp) {
1319 goto error;
1320 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001321 }
1322
Alex Williamson04b16652010-07-02 11:13:17 -06001323 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001324 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001325
1326error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001327 if (area != MAP_FAILED) {
1328 qemu_ram_munmap(area, memory);
1329 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001330 if (unlink_on_error) {
1331 unlink(path);
1332 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001333 if (fd != -1) {
1334 close(fd);
1335 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001336 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001337}
1338#endif
1339
Mike Day0dc3f442013-09-05 14:41:35 -04001340/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001341static ram_addr_t find_ram_offset(ram_addr_t size)
1342{
Alex Williamson04b16652010-07-02 11:13:17 -06001343 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001344 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001345
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001346 assert(size != 0); /* it would hand out same offset multiple times */
1347
Mike Day0dc3f442013-09-05 14:41:35 -04001348 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001349 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001350 }
Alex Williamson04b16652010-07-02 11:13:17 -06001351
Mike Day0dc3f442013-09-05 14:41:35 -04001352 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001353 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001354
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001355 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001356
Mike Day0dc3f442013-09-05 14:41:35 -04001357 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001358 if (next_block->offset >= end) {
1359 next = MIN(next, next_block->offset);
1360 }
1361 }
1362 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001363 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001364 mingap = next - end;
1365 }
1366 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001367
1368 if (offset == RAM_ADDR_MAX) {
1369 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1370 (uint64_t)size);
1371 abort();
1372 }
1373
Alex Williamson04b16652010-07-02 11:13:17 -06001374 return offset;
1375}
1376
Juan Quintela652d7ec2012-07-20 10:37:54 +02001377ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001378{
Alex Williamsond17b5282010-06-25 11:08:38 -06001379 RAMBlock *block;
1380 ram_addr_t last = 0;
1381
Mike Day0dc3f442013-09-05 14:41:35 -04001382 rcu_read_lock();
1383 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001384 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001385 }
Mike Day0dc3f442013-09-05 14:41:35 -04001386 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001387 return last;
1388}
1389
Jason Baronddb97f12012-08-02 15:44:16 -04001390static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1391{
1392 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001393
1394 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001395 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001396 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1397 if (ret) {
1398 perror("qemu_madvise");
1399 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1400 "but dump_guest_core=off specified\n");
1401 }
1402 }
1403}
1404
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001405const char *qemu_ram_get_idstr(RAMBlock *rb)
1406{
1407 return rb->idstr;
1408}
1409
Mike Dayae3a7042013-09-05 14:41:35 -04001410/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001411void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001412{
Gongleifa53a0e2016-05-10 10:04:59 +08001413 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001414
Avi Kivityc5705a72011-12-20 15:59:12 +02001415 assert(new_block);
1416 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001417
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001418 if (dev) {
1419 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001420 if (id) {
1421 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001422 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001423 }
1424 }
1425 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1426
Gongleiab0a9952016-05-10 10:05:00 +08001427 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001428 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001429 if (block != new_block &&
1430 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001431 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1432 new_block->idstr);
1433 abort();
1434 }
1435 }
Mike Day0dc3f442013-09-05 14:41:35 -04001436 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001437}
1438
Mike Dayae3a7042013-09-05 14:41:35 -04001439/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001440void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001441{
Mike Dayae3a7042013-09-05 14:41:35 -04001442 /* FIXME: arch_init.c assumes that this is not called throughout
1443 * migration. Ignore the problem since hot-unplug during migration
1444 * does not work anyway.
1445 */
Hu Tao20cfe882014-04-02 15:13:26 +08001446 if (block) {
1447 memset(block->idstr, 0, sizeof(block->idstr));
1448 }
1449}
1450
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001451static int memory_try_enable_merging(void *addr, size_t len)
1452{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001453 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001454 /* disabled by the user */
1455 return 0;
1456 }
1457
1458 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1459}
1460
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001461/* Only legal before guest might have detected the memory size: e.g. on
1462 * incoming migration, or right after reset.
1463 *
1464 * As memory core doesn't know how is memory accessed, it is up to
1465 * resize callback to update device state and/or add assertions to detect
1466 * misuse, if necessary.
1467 */
Gongleifa53a0e2016-05-10 10:04:59 +08001468int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001469{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001470 assert(block);
1471
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001472 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001473
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001474 if (block->used_length == newsize) {
1475 return 0;
1476 }
1477
1478 if (!(block->flags & RAM_RESIZEABLE)) {
1479 error_setg_errno(errp, EINVAL,
1480 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1481 " in != 0x" RAM_ADDR_FMT, block->idstr,
1482 newsize, block->used_length);
1483 return -EINVAL;
1484 }
1485
1486 if (block->max_length < newsize) {
1487 error_setg_errno(errp, EINVAL,
1488 "Length too large: %s: 0x" RAM_ADDR_FMT
1489 " > 0x" RAM_ADDR_FMT, block->idstr,
1490 newsize, block->max_length);
1491 return -EINVAL;
1492 }
1493
1494 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1495 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001496 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1497 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001498 memory_region_set_size(block->mr, newsize);
1499 if (block->resized) {
1500 block->resized(block->idstr, newsize, block->host);
1501 }
1502 return 0;
1503}
1504
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001505/* Called with ram_list.mutex held */
1506static void dirty_memory_extend(ram_addr_t old_ram_size,
1507 ram_addr_t new_ram_size)
1508{
1509 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1510 DIRTY_MEMORY_BLOCK_SIZE);
1511 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1512 DIRTY_MEMORY_BLOCK_SIZE);
1513 int i;
1514
1515 /* Only need to extend if block count increased */
1516 if (new_num_blocks <= old_num_blocks) {
1517 return;
1518 }
1519
1520 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1521 DirtyMemoryBlocks *old_blocks;
1522 DirtyMemoryBlocks *new_blocks;
1523 int j;
1524
1525 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1526 new_blocks = g_malloc(sizeof(*new_blocks) +
1527 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1528
1529 if (old_num_blocks) {
1530 memcpy(new_blocks->blocks, old_blocks->blocks,
1531 old_num_blocks * sizeof(old_blocks->blocks[0]));
1532 }
1533
1534 for (j = old_num_blocks; j < new_num_blocks; j++) {
1535 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1536 }
1537
1538 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1539
1540 if (old_blocks) {
1541 g_free_rcu(old_blocks, rcu);
1542 }
1543 }
1544}
1545
Fam Zheng528f46a2016-03-01 14:18:18 +08001546static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001547{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001548 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001549 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001550 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001551 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001552
1553 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001554
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001555 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001556 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001557
1558 if (!new_block->host) {
1559 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001560 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001561 new_block->mr, &err);
1562 if (err) {
1563 error_propagate(errp, err);
1564 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001565 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001566 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001567 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001568 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001569 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001570 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001571 error_setg_errno(errp, errno,
1572 "cannot set up guest memory '%s'",
1573 memory_region_name(new_block->mr));
1574 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001575 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001576 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001577 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001578 }
1579 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001580
Li Zhijiandd631692015-07-02 20:18:06 +08001581 new_ram_size = MAX(old_ram_size,
1582 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1583 if (new_ram_size > old_ram_size) {
1584 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001585 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001586 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001587 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1588 * QLIST (which has an RCU-friendly variant) does not have insertion at
1589 * tail, so save the last element in last_block.
1590 */
Mike Day0dc3f442013-09-05 14:41:35 -04001591 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001592 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001593 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001594 break;
1595 }
1596 }
1597 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001598 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001599 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001600 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001601 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001602 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001603 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001604 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001605
Mike Day0dc3f442013-09-05 14:41:35 -04001606 /* Write list before version */
1607 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001608 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001609 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001610
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001611 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001612 new_block->used_length,
1613 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001614
Paolo Bonzinia904c912015-01-21 16:18:35 +01001615 if (new_block->host) {
1616 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1617 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1618 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1619 if (kvm_enabled()) {
1620 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1621 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001622 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001623}
1624
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001625#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001626RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1627 bool share, const char *mem_path,
1628 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001629{
1630 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001631 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001632
1633 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001634 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001635 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636 }
1637
1638 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1639 /*
1640 * file_ram_alloc() needs to allocate just like
1641 * phys_mem_alloc, but we haven't bothered to provide
1642 * a hook there.
1643 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001644 error_setg(errp,
1645 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001646 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001647 }
1648
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001649 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001650 new_block = g_malloc0(sizeof(*new_block));
1651 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001652 new_block->used_length = size;
1653 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001654 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001655 new_block->host = file_ram_alloc(new_block, size,
1656 mem_path, errp);
1657 if (!new_block->host) {
1658 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001659 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001660 }
1661
Fam Zheng528f46a2016-03-01 14:18:18 +08001662 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001663 if (local_err) {
1664 g_free(new_block);
1665 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001666 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001667 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001668 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001669}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001670#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001671
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001672static
Fam Zheng528f46a2016-03-01 14:18:18 +08001673RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1674 void (*resized)(const char*,
1675 uint64_t length,
1676 void *host),
1677 void *host, bool resizeable,
1678 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001679{
1680 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001681 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001682
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001683 size = HOST_PAGE_ALIGN(size);
1684 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001685 new_block = g_malloc0(sizeof(*new_block));
1686 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001687 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001688 new_block->used_length = size;
1689 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001690 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001691 new_block->fd = -1;
1692 new_block->host = host;
1693 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001694 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001695 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001696 if (resizeable) {
1697 new_block->flags |= RAM_RESIZEABLE;
1698 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001699 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001700 if (local_err) {
1701 g_free(new_block);
1702 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001703 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001704 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001705 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001706}
1707
Fam Zheng528f46a2016-03-01 14:18:18 +08001708RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001709 MemoryRegion *mr, Error **errp)
1710{
1711 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1712}
1713
Fam Zheng528f46a2016-03-01 14:18:18 +08001714RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001715{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001716 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1717}
1718
Fam Zheng528f46a2016-03-01 14:18:18 +08001719RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001720 void (*resized)(const char*,
1721 uint64_t length,
1722 void *host),
1723 MemoryRegion *mr, Error **errp)
1724{
1725 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001726}
bellarde9a1ab12007-02-08 23:08:38 +00001727
Paolo Bonzini43771532013-09-09 17:58:40 +02001728static void reclaim_ramblock(RAMBlock *block)
1729{
1730 if (block->flags & RAM_PREALLOC) {
1731 ;
1732 } else if (xen_enabled()) {
1733 xen_invalidate_map_cache_entry(block->host);
1734#ifndef _WIN32
1735 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001736 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001737 close(block->fd);
1738#endif
1739 } else {
1740 qemu_anon_ram_free(block->host, block->max_length);
1741 }
1742 g_free(block);
1743}
1744
Fam Zhengf1060c52016-03-01 14:18:22 +08001745void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001746{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001747 if (!block) {
1748 return;
1749 }
1750
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001751 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001752 QLIST_REMOVE_RCU(block, next);
1753 ram_list.mru_block = NULL;
1754 /* Write list before version */
1755 smp_wmb();
1756 ram_list.version++;
1757 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001758 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001759}
1760
Huang Yingcd19cfa2011-03-02 08:56:19 +01001761#ifndef _WIN32
1762void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1763{
1764 RAMBlock *block;
1765 ram_addr_t offset;
1766 int flags;
1767 void *area, *vaddr;
1768
Mike Day0dc3f442013-09-05 14:41:35 -04001769 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001770 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001771 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001772 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001773 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001774 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001775 } else if (xen_enabled()) {
1776 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001777 } else {
1778 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001779 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001780 flags |= (block->flags & RAM_SHARED ?
1781 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001782 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1783 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001784 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001785 /*
1786 * Remap needs to match alloc. Accelerators that
1787 * set phys_mem_alloc never remap. If they did,
1788 * we'd need a remap hook here.
1789 */
1790 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1791
Huang Yingcd19cfa2011-03-02 08:56:19 +01001792 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1793 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1794 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001795 }
1796 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001797 fprintf(stderr, "Could not remap addr: "
1798 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001799 length, addr);
1800 exit(1);
1801 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001802 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001803 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001804 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001805 }
1806 }
1807}
1808#endif /* !_WIN32 */
1809
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001810/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001811 * This should not be used for general purpose DMA. Use address_space_map
1812 * or address_space_rw instead. For local memory (e.g. video ram) that the
1813 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001814 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001815 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001816 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001817void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001818{
Gonglei3655cb92016-02-20 10:35:20 +08001819 RAMBlock *block = ram_block;
1820
1821 if (block == NULL) {
1822 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001823 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001824 }
Mike Dayae3a7042013-09-05 14:41:35 -04001825
1826 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001827 /* We need to check if the requested address is in the RAM
1828 * because we don't want to map the entire memory in QEMU.
1829 * In that case just map until the end of the page.
1830 */
1831 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001832 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001833 }
Mike Dayae3a7042013-09-05 14:41:35 -04001834
1835 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001836 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001837 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001838}
1839
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001840/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001841 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001842 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001843 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001844 */
Gonglei3655cb92016-02-20 10:35:20 +08001845static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1846 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001847{
Gonglei3655cb92016-02-20 10:35:20 +08001848 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001849 if (*size == 0) {
1850 return NULL;
1851 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001852
Gonglei3655cb92016-02-20 10:35:20 +08001853 if (block == NULL) {
1854 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001855 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001856 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001857 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001858
1859 if (xen_enabled() && block->host == NULL) {
1860 /* We need to check if the requested address is in the RAM
1861 * because we don't want to map the entire memory in QEMU.
1862 * In that case just map the requested area.
1863 */
1864 if (block->offset == 0) {
1865 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001866 }
1867
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001868 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001869 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001870
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001871 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001872}
1873
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001874/*
1875 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1876 * in that RAMBlock.
1877 *
1878 * ptr: Host pointer to look up
1879 * round_offset: If true round the result offset down to a page boundary
1880 * *ram_addr: set to result ram_addr
1881 * *offset: set to result offset within the RAMBlock
1882 *
1883 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001884 *
1885 * By the time this function returns, the returned pointer is not protected
1886 * by RCU anymore. If the caller is not within an RCU critical section and
1887 * does not hold the iothread lock, it must have other means of protecting the
1888 * pointer, such as a reference to the region that includes the incoming
1889 * ram_addr_t.
1890 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001891RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001892 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001893{
pbrook94a6b542009-04-11 17:15:54 +00001894 RAMBlock *block;
1895 uint8_t *host = ptr;
1896
Jan Kiszka868bb332011-06-21 22:59:09 +02001897 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001898 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001899 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001900 ram_addr = xen_ram_addr_from_mapcache(ptr);
1901 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001902 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001903 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001904 }
Mike Day0dc3f442013-09-05 14:41:35 -04001905 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001906 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001907 }
1908
Mike Day0dc3f442013-09-05 14:41:35 -04001909 rcu_read_lock();
1910 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001911 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001912 goto found;
1913 }
1914
Mike Day0dc3f442013-09-05 14:41:35 -04001915 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001916 /* This case append when the block is not mapped. */
1917 if (block->host == NULL) {
1918 continue;
1919 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001920 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001921 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001922 }
pbrook94a6b542009-04-11 17:15:54 +00001923 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001924
Mike Day0dc3f442013-09-05 14:41:35 -04001925 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001926 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001927
1928found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001929 *offset = (host - block->host);
1930 if (round_offset) {
1931 *offset &= TARGET_PAGE_MASK;
1932 }
Mike Day0dc3f442013-09-05 14:41:35 -04001933 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001934 return block;
1935}
1936
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001937/*
1938 * Finds the named RAMBlock
1939 *
1940 * name: The name of RAMBlock to find
1941 *
1942 * Returns: RAMBlock (or NULL if not found)
1943 */
1944RAMBlock *qemu_ram_block_by_name(const char *name)
1945{
1946 RAMBlock *block;
1947
1948 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1949 if (!strcmp(name, block->idstr)) {
1950 return block;
1951 }
1952 }
1953
1954 return NULL;
1955}
1956
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001957/* Some of the softmmu routines need to translate from a host pointer
1958 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001959ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001960{
1961 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001962 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001963
Paolo Bonzinif615f392016-05-26 10:07:50 +02001964 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001965 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001966 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001967 }
1968
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001969 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001970}
Alex Williamsonf471a172010-06-11 11:11:42 -06001971
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001972/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001973static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001974 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001975{
Juan Quintela52159192013-10-08 12:44:04 +02001976 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001977 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001978 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001979 switch (size) {
1980 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001981 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001982 break;
1983 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001984 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001985 break;
1986 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001987 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001988 break;
1989 default:
1990 abort();
1991 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001992 /* Set both VGA and migration bits for simplicity and to remove
1993 * the notdirty callback faster.
1994 */
1995 cpu_physical_memory_set_dirty_range(ram_addr, size,
1996 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001997 /* we remove the notdirty callback only if the code has been
1998 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001999 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002000 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002001 }
bellard1ccde1c2004-02-06 19:46:14 +00002002}
2003
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002004static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2005 unsigned size, bool is_write)
2006{
2007 return is_write;
2008}
2009
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002010static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002011 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002012 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002013 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002014};
2015
pbrook0f459d12008-06-09 00:20:13 +00002016/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002017static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002018{
Andreas Färber93afead2013-08-26 03:41:01 +02002019 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002020 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002021 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002022 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002023 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002024 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002025 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002026
Andreas Färberff4700b2013-08-26 18:23:18 +02002027 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002028 /* We re-entered the check after replacing the TB. Now raise
2029 * the debug interrupt so that is will trigger after the
2030 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002031 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002032 return;
2033 }
Andreas Färber93afead2013-08-26 03:41:01 +02002034 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002035 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002036 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2037 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002038 if (flags == BP_MEM_READ) {
2039 wp->flags |= BP_WATCHPOINT_HIT_READ;
2040 } else {
2041 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2042 }
2043 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002044 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002045 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002046 if (wp->flags & BP_CPU &&
2047 !cc->debug_check_watchpoint(cpu, wp)) {
2048 wp->flags &= ~BP_WATCHPOINT_HIT;
2049 continue;
2050 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002051 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002052 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002053 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002054 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002055 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002056 } else {
2057 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002058 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002059 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002060 }
aliguori06d55cc2008-11-18 20:24:06 +00002061 }
aliguori6e140f22008-11-18 20:37:55 +00002062 } else {
2063 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002064 }
2065 }
2066}
2067
pbrook6658ffb2007-03-16 23:58:11 +00002068/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2069 so these check for a hit then pass through to the normal out-of-line
2070 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002071static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2072 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002073{
Peter Maydell66b9b432015-04-26 16:49:24 +01002074 MemTxResult res;
2075 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002076 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2077 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002078
Peter Maydell66b9b432015-04-26 16:49:24 +01002079 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002080 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002081 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002082 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002083 break;
2084 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002085 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002086 break;
2087 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002088 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002089 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002090 default: abort();
2091 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002092 *pdata = data;
2093 return res;
2094}
2095
2096static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2097 uint64_t val, unsigned size,
2098 MemTxAttrs attrs)
2099{
2100 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002101 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2102 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002103
2104 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2105 switch (size) {
2106 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002107 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002108 break;
2109 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002110 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002111 break;
2112 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002113 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002114 break;
2115 default: abort();
2116 }
2117 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002118}
2119
Avi Kivity1ec9b902012-01-02 12:47:48 +02002120static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002121 .read_with_attrs = watch_mem_read,
2122 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002123 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002124};
pbrook6658ffb2007-03-16 23:58:11 +00002125
Peter Maydellf25a49e2015-04-26 16:49:24 +01002126static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2127 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002128{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002129 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002130 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002131 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002132
blueswir1db7b5422007-05-26 17:36:03 +00002133#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002134 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002135 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002136#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002137 res = address_space_read(subpage->as, addr + subpage->base,
2138 attrs, buf, len);
2139 if (res) {
2140 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002141 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002142 switch (len) {
2143 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002144 *data = ldub_p(buf);
2145 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002146 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002147 *data = lduw_p(buf);
2148 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002149 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002150 *data = ldl_p(buf);
2151 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002152 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002153 *data = ldq_p(buf);
2154 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002155 default:
2156 abort();
2157 }
blueswir1db7b5422007-05-26 17:36:03 +00002158}
2159
Peter Maydellf25a49e2015-04-26 16:49:24 +01002160static MemTxResult subpage_write(void *opaque, hwaddr addr,
2161 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002162{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002163 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002164 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002165
blueswir1db7b5422007-05-26 17:36:03 +00002166#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002167 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002168 " value %"PRIx64"\n",
2169 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002170#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002171 switch (len) {
2172 case 1:
2173 stb_p(buf, value);
2174 break;
2175 case 2:
2176 stw_p(buf, value);
2177 break;
2178 case 4:
2179 stl_p(buf, value);
2180 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002181 case 8:
2182 stq_p(buf, value);
2183 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002184 default:
2185 abort();
2186 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002187 return address_space_write(subpage->as, addr + subpage->base,
2188 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002189}
2190
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002191static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002192 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002193{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002194 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002195#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002196 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002197 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002198#endif
2199
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002200 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002201 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002202}
2203
Avi Kivity70c68e42012-01-02 12:32:48 +02002204static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002205 .read_with_attrs = subpage_read,
2206 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002207 .impl.min_access_size = 1,
2208 .impl.max_access_size = 8,
2209 .valid.min_access_size = 1,
2210 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002211 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002212 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002213};
2214
Anthony Liguoric227f092009-10-01 16:12:16 -05002215static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002216 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002217{
2218 int idx, eidx;
2219
2220 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2221 return -1;
2222 idx = SUBPAGE_IDX(start);
2223 eidx = SUBPAGE_IDX(end);
2224#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002225 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2226 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002227#endif
blueswir1db7b5422007-05-26 17:36:03 +00002228 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002229 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002230 }
2231
2232 return 0;
2233}
2234
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002235static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002236{
Anthony Liguoric227f092009-10-01 16:12:16 -05002237 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002238
Anthony Liguori7267c092011-08-20 22:09:37 -05002239 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002240
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002241 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002242 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002243 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002244 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002245 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002246#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002247 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2248 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002249#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002250 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002251
2252 return mmio;
2253}
2254
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002255static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2256 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002257{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002258 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002259 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002260 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002261 .mr = mr,
2262 .offset_within_address_space = 0,
2263 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002264 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002265 };
2266
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002267 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002268}
2269
Peter Maydella54c87b2016-01-21 14:15:05 +00002270MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002271{
Peter Maydella54c87b2016-01-21 14:15:05 +00002272 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2273 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002274 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002275 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002276
2277 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002278}
2279
Avi Kivitye9179ce2009-06-14 11:38:52 +03002280static void io_mem_init(void)
2281{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002282 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002283 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002284 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002285 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002286 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002287 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002288 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002289}
2290
Avi Kivityac1970f2012-10-03 16:22:53 +02002291static void mem_begin(MemoryListener *listener)
2292{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002293 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002294 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2295 uint16_t n;
2296
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002297 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002298 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002299 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002300 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002301 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002302 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002303 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002304 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002305
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002306 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002307 d->as = as;
2308 as->next_dispatch = d;
2309}
2310
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002311static void address_space_dispatch_free(AddressSpaceDispatch *d)
2312{
2313 phys_sections_free(&d->map);
2314 g_free(d);
2315}
2316
Paolo Bonzini00752702013-05-29 12:13:54 +02002317static void mem_commit(MemoryListener *listener)
2318{
2319 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002320 AddressSpaceDispatch *cur = as->dispatch;
2321 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002322
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002323 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002324
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002325 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002326 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002327 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002328 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002329}
2330
Avi Kivity1d711482012-10-02 18:54:45 +02002331static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002332{
Peter Maydell32857f42015-10-01 15:29:50 +01002333 CPUAddressSpace *cpuas;
2334 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002335
2336 /* since each CPU stores ram addresses in its TLB cache, we must
2337 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002338 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2339 cpu_reloading_memory_map();
2340 /* The CPU and TLB are protected by the iothread lock.
2341 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2342 * may have split the RCU critical section.
2343 */
2344 d = atomic_rcu_read(&cpuas->as->dispatch);
2345 cpuas->memory_dispatch = d;
2346 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002347}
2348
Avi Kivityac1970f2012-10-03 16:22:53 +02002349void address_space_init_dispatch(AddressSpace *as)
2350{
Paolo Bonzini00752702013-05-29 12:13:54 +02002351 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002352 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002353 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002354 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002355 .region_add = mem_add,
2356 .region_nop = mem_add,
2357 .priority = 0,
2358 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002359 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002360}
2361
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002362void address_space_unregister(AddressSpace *as)
2363{
2364 memory_listener_unregister(&as->dispatch_listener);
2365}
2366
Avi Kivity83f3c252012-10-07 12:59:55 +02002367void address_space_destroy_dispatch(AddressSpace *as)
2368{
2369 AddressSpaceDispatch *d = as->dispatch;
2370
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002371 atomic_rcu_set(&as->dispatch, NULL);
2372 if (d) {
2373 call_rcu(d, address_space_dispatch_free, rcu);
2374 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002375}
2376
Avi Kivity62152b82011-07-26 14:26:14 +03002377static void memory_map_init(void)
2378{
Anthony Liguori7267c092011-08-20 22:09:37 -05002379 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002380
Paolo Bonzini57271d62013-11-07 17:14:37 +01002381 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002382 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002383
Anthony Liguori7267c092011-08-20 22:09:37 -05002384 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002385 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2386 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002387 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002388}
2389
2390MemoryRegion *get_system_memory(void)
2391{
2392 return system_memory;
2393}
2394
Avi Kivity309cb472011-08-08 16:09:03 +03002395MemoryRegion *get_system_io(void)
2396{
2397 return system_io;
2398}
2399
pbrooke2eef172008-06-08 01:09:01 +00002400#endif /* !defined(CONFIG_USER_ONLY) */
2401
bellard13eb76e2004-01-24 15:23:36 +00002402/* physical memory access (slow version, mainly for debug) */
2403#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002404int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002405 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002406{
2407 int l, flags;
2408 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002409 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002410
2411 while (len > 0) {
2412 page = addr & TARGET_PAGE_MASK;
2413 l = (page + TARGET_PAGE_SIZE) - addr;
2414 if (l > len)
2415 l = len;
2416 flags = page_get_flags(page);
2417 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002418 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002419 if (is_write) {
2420 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002421 return -1;
bellard579a97f2007-11-11 14:26:47 +00002422 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002423 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002424 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002425 memcpy(p, buf, l);
2426 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002427 } else {
2428 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002429 return -1;
bellard579a97f2007-11-11 14:26:47 +00002430 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002431 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002432 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002433 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002434 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002435 }
2436 len -= l;
2437 buf += l;
2438 addr += l;
2439 }
Paul Brooka68fe892010-03-01 00:08:59 +00002440 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002441}
bellard8df1cd02005-01-28 22:37:22 +00002442
bellard13eb76e2004-01-24 15:23:36 +00002443#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002444
Paolo Bonzini845b6212015-03-23 11:45:53 +01002445static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002446 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002447{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002448 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002449 addr += memory_region_get_ram_addr(mr);
2450
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002451 /* No early return if dirty_log_mask is or becomes 0, because
2452 * cpu_physical_memory_set_dirty_range will still call
2453 * xen_modified_memory.
2454 */
2455 if (dirty_log_mask) {
2456 dirty_log_mask =
2457 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002458 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002459 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2460 tb_invalidate_phys_range(addr, addr + length);
2461 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2462 }
2463 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002464}
2465
Richard Henderson23326162013-07-08 14:55:59 -07002466static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002467{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002468 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002469
2470 /* Regions are assumed to support 1-4 byte accesses unless
2471 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002472 if (access_size_max == 0) {
2473 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002474 }
Richard Henderson23326162013-07-08 14:55:59 -07002475
2476 /* Bound the maximum access by the alignment of the address. */
2477 if (!mr->ops->impl.unaligned) {
2478 unsigned align_size_max = addr & -addr;
2479 if (align_size_max != 0 && align_size_max < access_size_max) {
2480 access_size_max = align_size_max;
2481 }
2482 }
2483
2484 /* Don't attempt accesses larger than the maximum. */
2485 if (l > access_size_max) {
2486 l = access_size_max;
2487 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002488 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002489
2490 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002491}
2492
Jan Kiszka4840f102015-06-18 18:47:22 +02002493static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002494{
Jan Kiszka4840f102015-06-18 18:47:22 +02002495 bool unlocked = !qemu_mutex_iothread_locked();
2496 bool release_lock = false;
2497
2498 if (unlocked && mr->global_locking) {
2499 qemu_mutex_lock_iothread();
2500 unlocked = false;
2501 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002502 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002503 if (mr->flush_coalesced_mmio) {
2504 if (unlocked) {
2505 qemu_mutex_lock_iothread();
2506 }
2507 qemu_flush_coalesced_mmio_buffer();
2508 if (unlocked) {
2509 qemu_mutex_unlock_iothread();
2510 }
2511 }
2512
2513 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002514}
2515
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002516/* Called within RCU critical section. */
2517static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2518 MemTxAttrs attrs,
2519 const uint8_t *buf,
2520 int len, hwaddr addr1,
2521 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002522{
bellard13eb76e2004-01-24 15:23:36 +00002523 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002524 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002525 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002526 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002527
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002528 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002529 if (!memory_access_is_direct(mr, true)) {
2530 release_lock |= prepare_mmio_access(mr);
2531 l = memory_access_size(mr, l, addr1);
2532 /* XXX: could force current_cpu to NULL to avoid
2533 potential bugs */
2534 switch (l) {
2535 case 8:
2536 /* 64 bit write access */
2537 val = ldq_p(buf);
2538 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2539 attrs);
2540 break;
2541 case 4:
2542 /* 32 bit write access */
2543 val = ldl_p(buf);
2544 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2545 attrs);
2546 break;
2547 case 2:
2548 /* 16 bit write access */
2549 val = lduw_p(buf);
2550 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2551 attrs);
2552 break;
2553 case 1:
2554 /* 8 bit write access */
2555 val = ldub_p(buf);
2556 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2557 attrs);
2558 break;
2559 default:
2560 abort();
bellard13eb76e2004-01-24 15:23:36 +00002561 }
2562 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002563 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002564 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002565 memcpy(ptr, buf, l);
2566 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002567 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002568
2569 if (release_lock) {
2570 qemu_mutex_unlock_iothread();
2571 release_lock = false;
2572 }
2573
bellard13eb76e2004-01-24 15:23:36 +00002574 len -= l;
2575 buf += l;
2576 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002577
2578 if (!len) {
2579 break;
2580 }
2581
2582 l = len;
2583 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002584 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002585
Peter Maydell3b643492015-04-26 16:49:23 +01002586 return result;
bellard13eb76e2004-01-24 15:23:36 +00002587}
bellard8df1cd02005-01-28 22:37:22 +00002588
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002589MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2590 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002591{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002592 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002593 hwaddr addr1;
2594 MemoryRegion *mr;
2595 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002596
2597 if (len > 0) {
2598 rcu_read_lock();
2599 l = len;
2600 mr = address_space_translate(as, addr, &addr1, &l, true);
2601 result = address_space_write_continue(as, addr, attrs, buf, len,
2602 addr1, l, mr);
2603 rcu_read_unlock();
2604 }
2605
2606 return result;
2607}
2608
2609/* Called within RCU critical section. */
2610MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2611 MemTxAttrs attrs, uint8_t *buf,
2612 int len, hwaddr addr1, hwaddr l,
2613 MemoryRegion *mr)
2614{
2615 uint8_t *ptr;
2616 uint64_t val;
2617 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002618 bool release_lock = false;
2619
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002620 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002621 if (!memory_access_is_direct(mr, false)) {
2622 /* I/O case */
2623 release_lock |= prepare_mmio_access(mr);
2624 l = memory_access_size(mr, l, addr1);
2625 switch (l) {
2626 case 8:
2627 /* 64 bit read access */
2628 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2629 attrs);
2630 stq_p(buf, val);
2631 break;
2632 case 4:
2633 /* 32 bit read access */
2634 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2635 attrs);
2636 stl_p(buf, val);
2637 break;
2638 case 2:
2639 /* 16 bit read access */
2640 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2641 attrs);
2642 stw_p(buf, val);
2643 break;
2644 case 1:
2645 /* 8 bit read access */
2646 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2647 attrs);
2648 stb_p(buf, val);
2649 break;
2650 default:
2651 abort();
2652 }
2653 } else {
2654 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002655 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002656 memcpy(buf, ptr, l);
2657 }
2658
2659 if (release_lock) {
2660 qemu_mutex_unlock_iothread();
2661 release_lock = false;
2662 }
2663
2664 len -= l;
2665 buf += l;
2666 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002667
2668 if (!len) {
2669 break;
2670 }
2671
2672 l = len;
2673 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002674 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002675
2676 return result;
2677}
2678
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002679MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2680 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002681{
2682 hwaddr l;
2683 hwaddr addr1;
2684 MemoryRegion *mr;
2685 MemTxResult result = MEMTX_OK;
2686
2687 if (len > 0) {
2688 rcu_read_lock();
2689 l = len;
2690 mr = address_space_translate(as, addr, &addr1, &l, false);
2691 result = address_space_read_continue(as, addr, attrs, buf, len,
2692 addr1, l, mr);
2693 rcu_read_unlock();
2694 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002695
2696 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002697}
2698
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002699MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2700 uint8_t *buf, int len, bool is_write)
2701{
2702 if (is_write) {
2703 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2704 } else {
2705 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2706 }
2707}
Avi Kivityac1970f2012-10-03 16:22:53 +02002708
Avi Kivitya8170e52012-10-23 12:30:10 +02002709void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002710 int len, int is_write)
2711{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002712 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2713 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002714}
2715
Alexander Graf582b55a2013-12-11 14:17:44 +01002716enum write_rom_type {
2717 WRITE_DATA,
2718 FLUSH_CACHE,
2719};
2720
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002721static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002722 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002723{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002724 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002725 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002726 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002727 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002728
Paolo Bonzini41063e12015-03-18 14:21:43 +01002729 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002730 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002731 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002732 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002733
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002734 if (!(memory_region_is_ram(mr) ||
2735 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002736 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002737 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002738 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002739 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002740 switch (type) {
2741 case WRITE_DATA:
2742 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002743 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002744 break;
2745 case FLUSH_CACHE:
2746 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2747 break;
2748 }
bellardd0ecd2a2006-04-23 17:14:48 +00002749 }
2750 len -= l;
2751 buf += l;
2752 addr += l;
2753 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002754 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002755}
2756
Alexander Graf582b55a2013-12-11 14:17:44 +01002757/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002758void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002759 const uint8_t *buf, int len)
2760{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002761 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002762}
2763
2764void cpu_flush_icache_range(hwaddr start, int len)
2765{
2766 /*
2767 * This function should do the same thing as an icache flush that was
2768 * triggered from within the guest. For TCG we are always cache coherent,
2769 * so there is no need to flush anything. For KVM / Xen we need to flush
2770 * the host's instruction cache at least.
2771 */
2772 if (tcg_enabled()) {
2773 return;
2774 }
2775
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002776 cpu_physical_memory_write_rom_internal(&address_space_memory,
2777 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002778}
2779
aliguori6d16c2f2009-01-22 16:59:11 +00002780typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002781 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002782 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002783 hwaddr addr;
2784 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002785 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002786} BounceBuffer;
2787
2788static BounceBuffer bounce;
2789
aliguoriba223c22009-01-22 16:59:16 +00002790typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002791 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002792 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002793} MapClient;
2794
Fam Zheng38e047b2015-03-16 17:03:35 +08002795QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002796static QLIST_HEAD(map_client_list, MapClient) map_client_list
2797 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002798
Fam Zhenge95205e2015-03-16 17:03:37 +08002799static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002800{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002801 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002802 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002803}
2804
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002805static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002806{
2807 MapClient *client;
2808
Blue Swirl72cf2d42009-09-12 07:36:22 +00002809 while (!QLIST_EMPTY(&map_client_list)) {
2810 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002811 qemu_bh_schedule(client->bh);
2812 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002813 }
2814}
2815
Fam Zhenge95205e2015-03-16 17:03:37 +08002816void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002817{
2818 MapClient *client = g_malloc(sizeof(*client));
2819
Fam Zheng38e047b2015-03-16 17:03:35 +08002820 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002821 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002822 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002823 if (!atomic_read(&bounce.in_use)) {
2824 cpu_notify_map_clients_locked();
2825 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002826 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002827}
2828
Fam Zheng38e047b2015-03-16 17:03:35 +08002829void cpu_exec_init_all(void)
2830{
2831 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002832 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002833 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002834 qemu_mutex_init(&map_client_list_lock);
2835}
2836
Fam Zhenge95205e2015-03-16 17:03:37 +08002837void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002838{
Fam Zhenge95205e2015-03-16 17:03:37 +08002839 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002840
Fam Zhenge95205e2015-03-16 17:03:37 +08002841 qemu_mutex_lock(&map_client_list_lock);
2842 QLIST_FOREACH(client, &map_client_list, link) {
2843 if (client->bh == bh) {
2844 cpu_unregister_map_client_do(client);
2845 break;
2846 }
2847 }
2848 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002849}
2850
2851static void cpu_notify_map_clients(void)
2852{
Fam Zheng38e047b2015-03-16 17:03:35 +08002853 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002854 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002855 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002856}
2857
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002858bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2859{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002860 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002861 hwaddr l, xlat;
2862
Paolo Bonzini41063e12015-03-18 14:21:43 +01002863 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002864 while (len > 0) {
2865 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002866 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2867 if (!memory_access_is_direct(mr, is_write)) {
2868 l = memory_access_size(mr, l, addr);
2869 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002870 return false;
2871 }
2872 }
2873
2874 len -= l;
2875 addr += l;
2876 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002877 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002878 return true;
2879}
2880
aliguori6d16c2f2009-01-22 16:59:11 +00002881/* Map a physical memory region into a host virtual address.
2882 * May map a subset of the requested range, given by and returned in *plen.
2883 * May return NULL if resources needed to perform the mapping are exhausted.
2884 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002885 * Use cpu_register_map_client() to know when retrying the map operation is
2886 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002887 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002888void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002889 hwaddr addr,
2890 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002891 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002892{
Avi Kivitya8170e52012-10-23 12:30:10 +02002893 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002894 hwaddr done = 0;
2895 hwaddr l, xlat, base;
2896 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002897 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002898
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002899 if (len == 0) {
2900 return NULL;
2901 }
aliguori6d16c2f2009-01-22 16:59:11 +00002902
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002903 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002904 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002905 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002906
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002907 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002908 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002909 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002910 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002911 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002912 /* Avoid unbounded allocations */
2913 l = MIN(l, TARGET_PAGE_SIZE);
2914 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002915 bounce.addr = addr;
2916 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002917
2918 memory_region_ref(mr);
2919 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002920 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002921 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2922 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002923 }
aliguori6d16c2f2009-01-22 16:59:11 +00002924
Paolo Bonzini41063e12015-03-18 14:21:43 +01002925 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002926 *plen = l;
2927 return bounce.buffer;
2928 }
2929
2930 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002931
2932 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002933 len -= l;
2934 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002935 done += l;
2936 if (len == 0) {
2937 break;
2938 }
2939
2940 l = len;
2941 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2942 if (this_mr != mr || xlat != base + done) {
2943 break;
2944 }
aliguori6d16c2f2009-01-22 16:59:11 +00002945 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002946
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002947 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002948 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002949 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002950 rcu_read_unlock();
2951
2952 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002953}
2954
Avi Kivityac1970f2012-10-03 16:22:53 +02002955/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002956 * Will also mark the memory as dirty if is_write == 1. access_len gives
2957 * the amount of memory that was actually read or written by the caller.
2958 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002959void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2960 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002961{
2962 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002963 MemoryRegion *mr;
2964 ram_addr_t addr1;
2965
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002966 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002967 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002968 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002969 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002970 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002971 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002972 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002973 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002974 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002975 return;
2976 }
2977 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002978 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2979 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002980 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002981 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002982 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002983 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002984 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002985 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002986}
bellardd0ecd2a2006-04-23 17:14:48 +00002987
Avi Kivitya8170e52012-10-23 12:30:10 +02002988void *cpu_physical_memory_map(hwaddr addr,
2989 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002990 int is_write)
2991{
2992 return address_space_map(&address_space_memory, addr, plen, is_write);
2993}
2994
Avi Kivitya8170e52012-10-23 12:30:10 +02002995void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2996 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002997{
2998 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2999}
3000
bellard8df1cd02005-01-28 22:37:22 +00003001/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003002static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3003 MemTxAttrs attrs,
3004 MemTxResult *result,
3005 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003006{
bellard8df1cd02005-01-28 22:37:22 +00003007 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003008 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003009 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003010 hwaddr l = 4;
3011 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003012 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003013 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003014
Paolo Bonzini41063e12015-03-18 14:21:43 +01003015 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003016 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003017 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003018 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003019
bellard8df1cd02005-01-28 22:37:22 +00003020 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003021 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003022#if defined(TARGET_WORDS_BIGENDIAN)
3023 if (endian == DEVICE_LITTLE_ENDIAN) {
3024 val = bswap32(val);
3025 }
3026#else
3027 if (endian == DEVICE_BIG_ENDIAN) {
3028 val = bswap32(val);
3029 }
3030#endif
bellard8df1cd02005-01-28 22:37:22 +00003031 } else {
3032 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003033 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003034 switch (endian) {
3035 case DEVICE_LITTLE_ENDIAN:
3036 val = ldl_le_p(ptr);
3037 break;
3038 case DEVICE_BIG_ENDIAN:
3039 val = ldl_be_p(ptr);
3040 break;
3041 default:
3042 val = ldl_p(ptr);
3043 break;
3044 }
Peter Maydell50013112015-04-26 16:49:24 +01003045 r = MEMTX_OK;
3046 }
3047 if (result) {
3048 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003049 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003050 if (release_lock) {
3051 qemu_mutex_unlock_iothread();
3052 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003053 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003054 return val;
3055}
3056
Peter Maydell50013112015-04-26 16:49:24 +01003057uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3058 MemTxAttrs attrs, MemTxResult *result)
3059{
3060 return address_space_ldl_internal(as, addr, attrs, result,
3061 DEVICE_NATIVE_ENDIAN);
3062}
3063
3064uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3065 MemTxAttrs attrs, MemTxResult *result)
3066{
3067 return address_space_ldl_internal(as, addr, attrs, result,
3068 DEVICE_LITTLE_ENDIAN);
3069}
3070
3071uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3072 MemTxAttrs attrs, MemTxResult *result)
3073{
3074 return address_space_ldl_internal(as, addr, attrs, result,
3075 DEVICE_BIG_ENDIAN);
3076}
3077
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003078uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003079{
Peter Maydell50013112015-04-26 16:49:24 +01003080 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003081}
3082
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003083uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003084{
Peter Maydell50013112015-04-26 16:49:24 +01003085 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003086}
3087
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003088uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003089{
Peter Maydell50013112015-04-26 16:49:24 +01003090 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003091}
3092
bellard84b7b8e2005-11-28 21:19:04 +00003093/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003094static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3095 MemTxAttrs attrs,
3096 MemTxResult *result,
3097 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003098{
bellard84b7b8e2005-11-28 21:19:04 +00003099 uint8_t *ptr;
3100 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003101 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003102 hwaddr l = 8;
3103 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003104 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003105 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003106
Paolo Bonzini41063e12015-03-18 14:21:43 +01003107 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003108 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003109 false);
3110 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003111 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003112
bellard84b7b8e2005-11-28 21:19:04 +00003113 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003114 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003115#if defined(TARGET_WORDS_BIGENDIAN)
3116 if (endian == DEVICE_LITTLE_ENDIAN) {
3117 val = bswap64(val);
3118 }
3119#else
3120 if (endian == DEVICE_BIG_ENDIAN) {
3121 val = bswap64(val);
3122 }
3123#endif
bellard84b7b8e2005-11-28 21:19:04 +00003124 } else {
3125 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003126 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003127 switch (endian) {
3128 case DEVICE_LITTLE_ENDIAN:
3129 val = ldq_le_p(ptr);
3130 break;
3131 case DEVICE_BIG_ENDIAN:
3132 val = ldq_be_p(ptr);
3133 break;
3134 default:
3135 val = ldq_p(ptr);
3136 break;
3137 }
Peter Maydell50013112015-04-26 16:49:24 +01003138 r = MEMTX_OK;
3139 }
3140 if (result) {
3141 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003142 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003143 if (release_lock) {
3144 qemu_mutex_unlock_iothread();
3145 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003146 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003147 return val;
3148}
3149
Peter Maydell50013112015-04-26 16:49:24 +01003150uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3151 MemTxAttrs attrs, MemTxResult *result)
3152{
3153 return address_space_ldq_internal(as, addr, attrs, result,
3154 DEVICE_NATIVE_ENDIAN);
3155}
3156
3157uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3158 MemTxAttrs attrs, MemTxResult *result)
3159{
3160 return address_space_ldq_internal(as, addr, attrs, result,
3161 DEVICE_LITTLE_ENDIAN);
3162}
3163
3164uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3165 MemTxAttrs attrs, MemTxResult *result)
3166{
3167 return address_space_ldq_internal(as, addr, attrs, result,
3168 DEVICE_BIG_ENDIAN);
3169}
3170
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003171uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003172{
Peter Maydell50013112015-04-26 16:49:24 +01003173 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003174}
3175
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003176uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003177{
Peter Maydell50013112015-04-26 16:49:24 +01003178 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003179}
3180
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003181uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003182{
Peter Maydell50013112015-04-26 16:49:24 +01003183 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003184}
3185
bellardaab33092005-10-30 20:48:42 +00003186/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003187uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3188 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003189{
3190 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003191 MemTxResult r;
3192
3193 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3194 if (result) {
3195 *result = r;
3196 }
bellardaab33092005-10-30 20:48:42 +00003197 return val;
3198}
3199
Peter Maydell50013112015-04-26 16:49:24 +01003200uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3201{
3202 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3203}
3204
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003205/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003206static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3207 hwaddr addr,
3208 MemTxAttrs attrs,
3209 MemTxResult *result,
3210 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003211{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003212 uint8_t *ptr;
3213 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003214 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003215 hwaddr l = 2;
3216 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003217 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003218 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003219
Paolo Bonzini41063e12015-03-18 14:21:43 +01003220 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003221 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003222 false);
3223 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003224 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003225
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003226 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003227 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003228#if defined(TARGET_WORDS_BIGENDIAN)
3229 if (endian == DEVICE_LITTLE_ENDIAN) {
3230 val = bswap16(val);
3231 }
3232#else
3233 if (endian == DEVICE_BIG_ENDIAN) {
3234 val = bswap16(val);
3235 }
3236#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003237 } else {
3238 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003239 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003240 switch (endian) {
3241 case DEVICE_LITTLE_ENDIAN:
3242 val = lduw_le_p(ptr);
3243 break;
3244 case DEVICE_BIG_ENDIAN:
3245 val = lduw_be_p(ptr);
3246 break;
3247 default:
3248 val = lduw_p(ptr);
3249 break;
3250 }
Peter Maydell50013112015-04-26 16:49:24 +01003251 r = MEMTX_OK;
3252 }
3253 if (result) {
3254 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003255 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003256 if (release_lock) {
3257 qemu_mutex_unlock_iothread();
3258 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003259 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003260 return val;
bellardaab33092005-10-30 20:48:42 +00003261}
3262
Peter Maydell50013112015-04-26 16:49:24 +01003263uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3264 MemTxAttrs attrs, MemTxResult *result)
3265{
3266 return address_space_lduw_internal(as, addr, attrs, result,
3267 DEVICE_NATIVE_ENDIAN);
3268}
3269
3270uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3271 MemTxAttrs attrs, MemTxResult *result)
3272{
3273 return address_space_lduw_internal(as, addr, attrs, result,
3274 DEVICE_LITTLE_ENDIAN);
3275}
3276
3277uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3278 MemTxAttrs attrs, MemTxResult *result)
3279{
3280 return address_space_lduw_internal(as, addr, attrs, result,
3281 DEVICE_BIG_ENDIAN);
3282}
3283
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003284uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003285{
Peter Maydell50013112015-04-26 16:49:24 +01003286 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003287}
3288
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003289uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003290{
Peter Maydell50013112015-04-26 16:49:24 +01003291 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003292}
3293
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003294uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003295{
Peter Maydell50013112015-04-26 16:49:24 +01003296 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003297}
3298
bellard8df1cd02005-01-28 22:37:22 +00003299/* warning: addr must be aligned. The ram page is not masked as dirty
3300 and the code inside is not invalidated. It is useful if the dirty
3301 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003302void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3303 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003304{
bellard8df1cd02005-01-28 22:37:22 +00003305 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003306 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003307 hwaddr l = 4;
3308 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003309 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003310 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003311 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003312
Paolo Bonzini41063e12015-03-18 14:21:43 +01003313 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003314 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003315 true);
3316 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003317 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003318
Peter Maydell50013112015-04-26 16:49:24 +01003319 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003320 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003321 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003322 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003323
Paolo Bonzini845b6212015-03-23 11:45:53 +01003324 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3325 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003326 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3327 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003328 r = MEMTX_OK;
3329 }
3330 if (result) {
3331 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003332 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003333 if (release_lock) {
3334 qemu_mutex_unlock_iothread();
3335 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003336 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003337}
3338
Peter Maydell50013112015-04-26 16:49:24 +01003339void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3340{
3341 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3342}
3343
bellard8df1cd02005-01-28 22:37:22 +00003344/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003345static inline void address_space_stl_internal(AddressSpace *as,
3346 hwaddr addr, uint32_t val,
3347 MemTxAttrs attrs,
3348 MemTxResult *result,
3349 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003350{
bellard8df1cd02005-01-28 22:37:22 +00003351 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003352 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003353 hwaddr l = 4;
3354 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003355 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003356 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003357
Paolo Bonzini41063e12015-03-18 14:21:43 +01003358 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003359 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003360 true);
3361 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003362 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003363
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003364#if defined(TARGET_WORDS_BIGENDIAN)
3365 if (endian == DEVICE_LITTLE_ENDIAN) {
3366 val = bswap32(val);
3367 }
3368#else
3369 if (endian == DEVICE_BIG_ENDIAN) {
3370 val = bswap32(val);
3371 }
3372#endif
Peter Maydell50013112015-04-26 16:49:24 +01003373 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003374 } else {
bellard8df1cd02005-01-28 22:37:22 +00003375 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003376 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003377 switch (endian) {
3378 case DEVICE_LITTLE_ENDIAN:
3379 stl_le_p(ptr, val);
3380 break;
3381 case DEVICE_BIG_ENDIAN:
3382 stl_be_p(ptr, val);
3383 break;
3384 default:
3385 stl_p(ptr, val);
3386 break;
3387 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003388 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003389 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003390 }
Peter Maydell50013112015-04-26 16:49:24 +01003391 if (result) {
3392 *result = r;
3393 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003394 if (release_lock) {
3395 qemu_mutex_unlock_iothread();
3396 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003397 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003398}
3399
3400void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3401 MemTxAttrs attrs, MemTxResult *result)
3402{
3403 address_space_stl_internal(as, addr, val, attrs, result,
3404 DEVICE_NATIVE_ENDIAN);
3405}
3406
3407void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3408 MemTxAttrs attrs, MemTxResult *result)
3409{
3410 address_space_stl_internal(as, addr, val, attrs, result,
3411 DEVICE_LITTLE_ENDIAN);
3412}
3413
3414void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3415 MemTxAttrs attrs, MemTxResult *result)
3416{
3417 address_space_stl_internal(as, addr, val, attrs, result,
3418 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003419}
3420
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003421void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003422{
Peter Maydell50013112015-04-26 16:49:24 +01003423 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003424}
3425
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003426void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003427{
Peter Maydell50013112015-04-26 16:49:24 +01003428 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003429}
3430
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003431void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003432{
Peter Maydell50013112015-04-26 16:49:24 +01003433 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003434}
3435
bellardaab33092005-10-30 20:48:42 +00003436/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003437void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3438 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003439{
3440 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003441 MemTxResult r;
3442
3443 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3444 if (result) {
3445 *result = r;
3446 }
3447}
3448
3449void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3450{
3451 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003452}
3453
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003454/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003455static inline void address_space_stw_internal(AddressSpace *as,
3456 hwaddr addr, uint32_t val,
3457 MemTxAttrs attrs,
3458 MemTxResult *result,
3459 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003460{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003461 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003462 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003463 hwaddr l = 2;
3464 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003465 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003466 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003467
Paolo Bonzini41063e12015-03-18 14:21:43 +01003468 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003469 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003470 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003471 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003472
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003473#if defined(TARGET_WORDS_BIGENDIAN)
3474 if (endian == DEVICE_LITTLE_ENDIAN) {
3475 val = bswap16(val);
3476 }
3477#else
3478 if (endian == DEVICE_BIG_ENDIAN) {
3479 val = bswap16(val);
3480 }
3481#endif
Peter Maydell50013112015-04-26 16:49:24 +01003482 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003483 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003484 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003485 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003486 switch (endian) {
3487 case DEVICE_LITTLE_ENDIAN:
3488 stw_le_p(ptr, val);
3489 break;
3490 case DEVICE_BIG_ENDIAN:
3491 stw_be_p(ptr, val);
3492 break;
3493 default:
3494 stw_p(ptr, val);
3495 break;
3496 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003497 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003498 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003499 }
Peter Maydell50013112015-04-26 16:49:24 +01003500 if (result) {
3501 *result = r;
3502 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003503 if (release_lock) {
3504 qemu_mutex_unlock_iothread();
3505 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003506 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003507}
3508
3509void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3510 MemTxAttrs attrs, MemTxResult *result)
3511{
3512 address_space_stw_internal(as, addr, val, attrs, result,
3513 DEVICE_NATIVE_ENDIAN);
3514}
3515
3516void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3517 MemTxAttrs attrs, MemTxResult *result)
3518{
3519 address_space_stw_internal(as, addr, val, attrs, result,
3520 DEVICE_LITTLE_ENDIAN);
3521}
3522
3523void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3524 MemTxAttrs attrs, MemTxResult *result)
3525{
3526 address_space_stw_internal(as, addr, val, attrs, result,
3527 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003528}
3529
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003530void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003531{
Peter Maydell50013112015-04-26 16:49:24 +01003532 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003533}
3534
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003535void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003536{
Peter Maydell50013112015-04-26 16:49:24 +01003537 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003538}
3539
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003540void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003541{
Peter Maydell50013112015-04-26 16:49:24 +01003542 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003543}
3544
bellardaab33092005-10-30 20:48:42 +00003545/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003546void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3547 MemTxAttrs attrs, MemTxResult *result)
3548{
3549 MemTxResult r;
3550 val = tswap64(val);
3551 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3552 if (result) {
3553 *result = r;
3554 }
3555}
3556
3557void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3558 MemTxAttrs attrs, MemTxResult *result)
3559{
3560 MemTxResult r;
3561 val = cpu_to_le64(val);
3562 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3563 if (result) {
3564 *result = r;
3565 }
3566}
3567void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3568 MemTxAttrs attrs, MemTxResult *result)
3569{
3570 MemTxResult r;
3571 val = cpu_to_be64(val);
3572 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3573 if (result) {
3574 *result = r;
3575 }
3576}
3577
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003578void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003579{
Peter Maydell50013112015-04-26 16:49:24 +01003580 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003581}
3582
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003583void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003584{
Peter Maydell50013112015-04-26 16:49:24 +01003585 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003586}
3587
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003588void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003589{
Peter Maydell50013112015-04-26 16:49:24 +01003590 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003591}
3592
aliguori5e2972f2009-03-28 17:51:36 +00003593/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003594int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003595 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003596{
3597 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003598 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003599 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003600
3601 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003602 int asidx;
3603 MemTxAttrs attrs;
3604
bellard13eb76e2004-01-24 15:23:36 +00003605 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003606 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3607 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003608 /* if no physical page mapped, return an error */
3609 if (phys_addr == -1)
3610 return -1;
3611 l = (page + TARGET_PAGE_SIZE) - addr;
3612 if (l > len)
3613 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003614 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003615 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003616 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3617 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003618 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003619 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3620 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003621 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003622 }
bellard13eb76e2004-01-24 15:23:36 +00003623 len -= l;
3624 buf += l;
3625 addr += l;
3626 }
3627 return 0;
3628}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003629
3630/*
3631 * Allows code that needs to deal with migration bitmaps etc to still be built
3632 * target independent.
3633 */
3634size_t qemu_target_page_bits(void)
3635{
3636 return TARGET_PAGE_BITS;
3637}
3638
Paul Brooka68fe892010-03-01 00:08:59 +00003639#endif
bellard13eb76e2004-01-24 15:23:36 +00003640
Blue Swirl8e4a4242013-01-06 18:30:17 +00003641/*
3642 * A helper function for the _utterly broken_ virtio device model to find out if
3643 * it's running on a big endian machine. Don't do this at home kids!
3644 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003645bool target_words_bigendian(void);
3646bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003647{
3648#if defined(TARGET_WORDS_BIGENDIAN)
3649 return true;
3650#else
3651 return false;
3652#endif
3653}
3654
Wen Congyang76f35532012-05-07 12:04:18 +08003655#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003656bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003657{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003658 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003659 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003660 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003661
Paolo Bonzini41063e12015-03-18 14:21:43 +01003662 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003663 mr = address_space_translate(&address_space_memory,
3664 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003665
Paolo Bonzini41063e12015-03-18 14:21:43 +01003666 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3667 rcu_read_unlock();
3668 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003669}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003670
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003671int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003672{
3673 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003674 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003675
Mike Day0dc3f442013-09-05 14:41:35 -04003676 rcu_read_lock();
3677 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003678 ret = func(block->idstr, block->host, block->offset,
3679 block->used_length, opaque);
3680 if (ret) {
3681 break;
3682 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003683 }
Mike Day0dc3f442013-09-05 14:41:35 -04003684 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003685 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003686}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003687#endif