blob: c81d5ab981e62d9351b4be7bd5b513dd888dbc9f [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Peter Lieven101420b2016-07-15 12:03:50 +0200190 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200195 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197}
198
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200{
201 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200202 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 PhysPageEntry e;
204 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210
211 e.skip = leaf ? 0 : 1;
212 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200214 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200217}
218
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200219static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
220 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200221 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222{
223 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200226 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200229 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200231
Paolo Bonzini03f49952013-11-07 17:14:36 +0100232 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200234 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200235 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200236 *index += step;
237 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200238 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200239 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200240 }
241 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200242 }
243}
244
Avi Kivityac1970f2012-10-03 16:22:53 +0200245static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200246 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200247 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000248{
Avi Kivity29990972012-02-13 20:21:20 +0200249 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000251
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200252 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000253}
254
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200255/* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
257 */
258static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
259{
260 unsigned valid_ptr = P_L2_SIZE;
261 int valid = 0;
262 PhysPageEntry *p;
263 int i;
264
265 if (lp->ptr == PHYS_MAP_NODE_NIL) {
266 return;
267 }
268
269 p = nodes[lp->ptr];
270 for (i = 0; i < P_L2_SIZE; i++) {
271 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272 continue;
273 }
274
275 valid_ptr = i;
276 valid++;
277 if (p[i].skip) {
278 phys_page_compact(&p[i], nodes, compacted);
279 }
280 }
281
282 /* We can only compress if there's only one child. */
283 if (valid != 1) {
284 return;
285 }
286
287 assert(valid_ptr < P_L2_SIZE);
288
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291 return;
292 }
293
294 lp->ptr = p[valid_ptr].ptr;
295 if (!p[valid_ptr].skip) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
300 * change this rule.
301 */
302 lp->skip = 0;
303 } else {
304 lp->skip += p[valid_ptr].skip;
305 }
306}
307
308static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309{
310 DECLARE_BITMAP(compacted, nodes_nb);
311
312 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200313 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200314 }
315}
316
Fam Zheng29cb5332016-03-01 14:18:23 +0800317static inline bool section_covers_addr(const MemoryRegionSection *section,
318 hwaddr addr)
319{
320 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
321 * the section must cover the entire address space.
322 */
323 return section->size.hi ||
324 range_covers_byte(section->offset_within_address_space,
325 section->size.lo, addr);
326}
327
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200328static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200329 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000330{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200332 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200334
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200335 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200336 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200338 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200339 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100340 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200341 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342
Fam Zheng29cb5332016-03-01 14:18:23 +0800343 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 return &sections[lp.ptr];
345 } else {
346 return &sections[PHYS_SECTION_UNASSIGNED];
347 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200348}
349
Blue Swirle5548612012-04-21 13:08:33 +0000350bool memory_region_is_unassigned(MemoryRegion *mr)
351{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200352 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000353 && mr != &io_mem_watch;
354}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200355
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100356/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200357static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200358 hwaddr addr,
359 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200360{
Fam Zheng729633c2016-03-01 14:18:24 +0800361 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800363 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200364
Fam Zheng729633c2016-03-01 14:18:24 +0800365 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
366 section_covers_addr(section, addr)) {
367 update = false;
368 } else {
369 section = phys_page_find(d->phys_map, addr, d->map.nodes,
370 d->map.sections);
371 update = true;
372 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 if (resolve_subpage && section->mr->subpage) {
374 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200375 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200376 }
Fam Zheng729633c2016-03-01 14:18:24 +0800377 if (update) {
378 atomic_set(&d->mru_section, section);
379 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200381}
382
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100383/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200384static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200385address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200386 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387{
388 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100390 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200392 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 /* Compute offset within MemoryRegionSection */
394 addr -= section->offset_within_address_space;
395
396 /* Compute offset within MemoryRegion */
397 *xlat = addr + section->offset_within_region;
398
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200399 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200400
401 /* MMIO registers can be expected to perform full-width accesses based only
402 * on their address, without considering adjacent registers that could
403 * decode to completely different MemoryRegions. When such registers
404 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405 * regions overlap wildly. For this reason we cannot clamp the accesses
406 * here.
407 *
408 * If the length is small (as is the case for address_space_ldl/stl),
409 * everything works fine. If the incoming length is large, however,
410 * the caller really has to do the clamping through memory_access_size.
411 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200413 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200414 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
415 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200416 return section;
417}
Jan Kiszka90260c62013-05-26 21:46:51 +0200418
Paolo Bonzini41063e12015-03-18 14:21:43 +0100419/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200420MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
421 hwaddr *xlat, hwaddr *plen,
422 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200423{
Avi Kivity30951152012-10-30 13:47:46 +0200424 IOMMUTLBEntry iotlb;
425 MemoryRegionSection *section;
426 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200427
428 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100429 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
430 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200431 mr = section->mr;
432
433 if (!mr->iommu_ops) {
434 break;
435 }
436
Le Tan8d7b8cb2014-08-16 13:55:37 +0800437 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200438 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
439 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200441 if (!(iotlb.perm & (1 << is_write))) {
442 mr = &io_mem_unassigned;
443 break;
444 }
445
446 as = iotlb.target_as;
447 }
448
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000449 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700451 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100452 }
453
Avi Kivity30951152012-10-30 13:47:46 +0200454 *xlat = addr;
455 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200456}
457
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100458/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200459MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000460address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200461 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200462{
Avi Kivity30951152012-10-30 13:47:46 +0200463 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000464 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
465
466 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200467
468 assert(!section->mr->iommu_ops);
469 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200470}
bellard9fa3e852004-01-04 18:06:42 +0000471#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000472
Andreas Färberb170fce2013-01-20 20:23:22 +0100473#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000474
Juan Quintelae59fb372009-09-29 22:48:21 +0200475static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476{
Andreas Färber259186a2013-01-17 18:51:17 +0100477 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200478
aurel323098dba2009-03-07 21:28:24 +0000479 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
480 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100481 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100482 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000483
484 return 0;
485}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487static int cpu_common_pre_load(void *opaque)
488{
489 CPUState *cpu = opaque;
490
Paolo Bonziniadee6422014-12-19 12:53:14 +0100491 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400492
493 return 0;
494}
495
496static bool cpu_common_exception_index_needed(void *opaque)
497{
498 CPUState *cpu = opaque;
499
Paolo Bonziniadee6422014-12-19 12:53:14 +0100500 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400501}
502
503static const VMStateDescription vmstate_cpu_common_exception_index = {
504 .name = "cpu_common/exception_index",
505 .version_id = 1,
506 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200507 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400508 .fields = (VMStateField[]) {
509 VMSTATE_INT32(exception_index, CPUState),
510 VMSTATE_END_OF_LIST()
511 }
512};
513
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300514static bool cpu_common_crash_occurred_needed(void *opaque)
515{
516 CPUState *cpu = opaque;
517
518 return cpu->crash_occurred;
519}
520
521static const VMStateDescription vmstate_cpu_common_crash_occurred = {
522 .name = "cpu_common/crash_occurred",
523 .version_id = 1,
524 .minimum_version_id = 1,
525 .needed = cpu_common_crash_occurred_needed,
526 .fields = (VMStateField[]) {
527 VMSTATE_BOOL(crash_occurred, CPUState),
528 VMSTATE_END_OF_LIST()
529 }
530};
531
Andreas Färber1a1562f2013-06-17 04:09:11 +0200532const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200533 .name = "cpu_common",
534 .version_id = 1,
535 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200537 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200538 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100539 VMSTATE_UINT32(halted, CPUState),
540 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400542 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 .subsections = (const VMStateDescription*[]) {
544 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300545 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200546 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200547 }
548};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200549
pbrook9656f322008-07-01 20:01:19 +0000550#endif
551
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100552CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400553{
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200558 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100559 }
Glauber Costa950f1472009-06-09 12:15:18 -0400560 }
561
Andreas Färberbdc44642013-06-24 23:50:24 +0200562 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400563}
564
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000566void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000567{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000568 CPUAddressSpace *newas;
569
570 /* Target code should have set num_ases before calling us */
571 assert(asidx < cpu->num_ases);
572
Peter Maydell56943e82016-01-21 14:15:04 +0000573 if (asidx == 0) {
574 /* address space 0 gets the convenience alias */
575 cpu->as = as;
576 }
577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 /* KVM cannot currently support multiple address spaces. */
579 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000581 if (!cpu->cpu_ases) {
582 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000583 }
Peter Maydell32857f42015-10-01 15:29:50 +0100584
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000585 newas = &cpu->cpu_ases[asidx];
586 newas->cpu = cpu;
587 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000588 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000589 newas->tcg_as_listener.commit = tcg_commit;
590 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000591 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000592}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000593
594AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
595{
596 /* Return the AddressSpace corresponding to the specified index */
597 return cpu->cpu_ases[asidx].as;
598}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599#endif
600
Igor Mammedov630eb0f2016-07-27 11:24:54 +0200601static bool cpu_index_auto_assigned;
602
Igor Mammedova07f9532016-07-25 11:59:21 +0200603static int cpu_get_free_index(void)
Bharata B Raob7bca732015-06-23 19:31:13 -0700604{
605 CPUState *some_cpu;
606 int cpu_index = 0;
607
Igor Mammedov630eb0f2016-07-27 11:24:54 +0200608 cpu_index_auto_assigned = true;
Bharata B Raob7bca732015-06-23 19:31:13 -0700609 CPU_FOREACH(some_cpu) {
610 cpu_index++;
611 }
612 return cpu_index;
613}
614
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530615void cpu_exec_exit(CPUState *cpu)
616{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530617 CPUClass *cc = CPU_GET_CLASS(cpu);
618
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530619 cpu_list_lock();
Igor Mammedov3b8c1762016-07-25 14:47:12 +0200620 if (!QTAILQ_IN_USE(cpu, node)) {
Igor Mammedov8b1b8352016-07-25 11:59:20 +0200621 /* there is nothing to undo since cpu_exec_init() hasn't been called */
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530622 cpu_list_unlock();
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530623 return;
624 }
625
Igor Mammedov630eb0f2016-07-27 11:24:54 +0200626 assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
627
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530628 QTAILQ_REMOVE(&cpus, cpu, node);
Igor Mammedova07f9532016-07-25 11:59:21 +0200629 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530630 cpu_list_unlock();
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530631
632 if (cc->vmsd != NULL) {
633 vmstate_unregister(NULL, cc->vmsd, cpu);
634 }
635 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
636 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
637 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530638}
639
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700640void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000641{
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200642 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Igor Mammedova07f9532016-07-25 11:59:21 +0200643 Error *local_err ATTRIBUTE_UNUSED = NULL;
bellard6a00d602005-11-21 23:25:50 +0000644
Peter Maydell56943e82016-01-21 14:15:04 +0000645 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000646 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000647
Eduardo Habkost291135b2015-04-27 17:00:33 -0300648#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300649 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000650
651 /* This is a softmmu CPU object, so create a property for it
652 * so users can wire up its memory. (This can't go in qom/cpu.c
653 * because that file is compiled only once for both user-mode
654 * and system builds.) The default if no link is set up is to use
655 * the system address space.
656 */
657 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
658 (Object **)&cpu->memory,
659 qdev_prop_allow_set_link_before_realize,
660 OBJ_PROP_LINK_UNREF_ON_RELEASE,
661 &error_abort);
662 cpu->memory = system_memory;
663 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300664#endif
665
pbrookc2764712009-03-07 15:24:59 +0000666 cpu_list_lock();
Igor Mammedova07f9532016-07-25 11:59:21 +0200667 if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
668 cpu->cpu_index = cpu_get_free_index();
669 assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
Igor Mammedov630eb0f2016-07-27 11:24:54 +0200670 } else {
671 assert(!cpu_index_auto_assigned);
bellard6a00d602005-11-21 23:25:50 +0000672 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200673 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000674 cpu_list_unlock();
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200675
676#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200677 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200678 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200679 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100680 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200681 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100682 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200683#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000684}
685
Paul Brook94df27f2010-02-28 23:47:45 +0000686#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200687static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000688{
689 tb_invalidate_phys_page_range(pc, pc + 1, 0);
690}
691#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200692static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400693{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000694 MemTxAttrs attrs;
695 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
696 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400697 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000698 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100699 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400700 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400701}
bellardc27004e2005-01-03 23:35:10 +0000702#endif
bellardd720b932004-04-25 17:57:43 +0000703
Paul Brookc527ee82010-03-01 03:31:14 +0000704#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200705void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000706
707{
708}
709
Peter Maydell3ee887e2014-09-12 14:06:48 +0100710int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
711 int flags)
712{
713 return -ENOSYS;
714}
715
716void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
717{
718}
719
Andreas Färber75a34032013-09-02 16:57:02 +0200720int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000721 int flags, CPUWatchpoint **watchpoint)
722{
723 return -ENOSYS;
724}
725#else
pbrook6658ffb2007-03-16 23:58:11 +0000726/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200727int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000728 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000729{
aliguoric0ce9982008-11-25 22:13:57 +0000730 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000731
Peter Maydell05068c02014-09-12 14:06:48 +0100732 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700733 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200734 error_report("tried to set invalid watchpoint at %"
735 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000736 return -EINVAL;
737 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500738 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000739
aliguoria1d1bb32008-11-18 20:07:32 +0000740 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100741 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000742 wp->flags = flags;
743
aliguori2dc9f412008-11-18 20:56:59 +0000744 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200745 if (flags & BP_GDB) {
746 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
747 } else {
748 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
749 }
aliguoria1d1bb32008-11-18 20:07:32 +0000750
Andreas Färber31b030d2013-09-04 01:29:02 +0200751 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000752
753 if (watchpoint)
754 *watchpoint = wp;
755 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000756}
757
aliguoria1d1bb32008-11-18 20:07:32 +0000758/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200759int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000760 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000761{
aliguoria1d1bb32008-11-18 20:07:32 +0000762 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000763
Andreas Färberff4700b2013-08-26 18:23:18 +0200764 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100765 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000766 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200767 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000768 return 0;
769 }
770 }
aliguoria1d1bb32008-11-18 20:07:32 +0000771 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000772}
773
aliguoria1d1bb32008-11-18 20:07:32 +0000774/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200775void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000776{
Andreas Färberff4700b2013-08-26 18:23:18 +0200777 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000778
Andreas Färber31b030d2013-09-04 01:29:02 +0200779 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000780
Anthony Liguori7267c092011-08-20 22:09:37 -0500781 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000782}
783
aliguoria1d1bb32008-11-18 20:07:32 +0000784/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200785void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000786{
aliguoric0ce9982008-11-25 22:13:57 +0000787 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000788
Andreas Färberff4700b2013-08-26 18:23:18 +0200789 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200790 if (wp->flags & mask) {
791 cpu_watchpoint_remove_by_ref(cpu, wp);
792 }
aliguoric0ce9982008-11-25 22:13:57 +0000793 }
aliguoria1d1bb32008-11-18 20:07:32 +0000794}
Peter Maydell05068c02014-09-12 14:06:48 +0100795
796/* Return true if this watchpoint address matches the specified
797 * access (ie the address range covered by the watchpoint overlaps
798 * partially or completely with the address range covered by the
799 * access).
800 */
801static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
802 vaddr addr,
803 vaddr len)
804{
805 /* We know the lengths are non-zero, but a little caution is
806 * required to avoid errors in the case where the range ends
807 * exactly at the top of the address space and so addr + len
808 * wraps round to zero.
809 */
810 vaddr wpend = wp->vaddr + wp->len - 1;
811 vaddr addrend = addr + len - 1;
812
813 return !(addr > wpend || wp->vaddr > addrend);
814}
815
Paul Brookc527ee82010-03-01 03:31:14 +0000816#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000817
818/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200819int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000820 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000821{
aliguoric0ce9982008-11-25 22:13:57 +0000822 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000823
Anthony Liguori7267c092011-08-20 22:09:37 -0500824 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000825
826 bp->pc = pc;
827 bp->flags = flags;
828
aliguori2dc9f412008-11-18 20:56:59 +0000829 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200830 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200831 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200832 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200833 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200834 }
aliguoria1d1bb32008-11-18 20:07:32 +0000835
Andreas Färberf0c3c502013-08-26 21:22:53 +0200836 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000837
Andreas Färber00b941e2013-06-29 18:55:54 +0200838 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000839 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200840 }
aliguoria1d1bb32008-11-18 20:07:32 +0000841 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000842}
843
844/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200845int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000846{
aliguoria1d1bb32008-11-18 20:07:32 +0000847 CPUBreakpoint *bp;
848
Andreas Färberf0c3c502013-08-26 21:22:53 +0200849 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000850 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200851 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000852 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000853 }
bellard4c3a88a2003-07-26 12:06:08 +0000854 }
aliguoria1d1bb32008-11-18 20:07:32 +0000855 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000856}
857
aliguoria1d1bb32008-11-18 20:07:32 +0000858/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200859void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000860{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200861 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
862
863 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000864
Anthony Liguori7267c092011-08-20 22:09:37 -0500865 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000866}
867
868/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200869void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000870{
aliguoric0ce9982008-11-25 22:13:57 +0000871 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000872
Andreas Färberf0c3c502013-08-26 21:22:53 +0200873 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200874 if (bp->flags & mask) {
875 cpu_breakpoint_remove_by_ref(cpu, bp);
876 }
aliguoric0ce9982008-11-25 22:13:57 +0000877 }
bellard4c3a88a2003-07-26 12:06:08 +0000878}
879
bellardc33a3462003-07-29 20:50:33 +0000880/* enable or disable single step mode. EXCP_DEBUG is returned by the
881 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200882void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000883{
Andreas Färbered2803d2013-06-21 20:20:45 +0200884 if (cpu->singlestep_enabled != enabled) {
885 cpu->singlestep_enabled = enabled;
886 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200887 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200888 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100889 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000890 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700891 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000892 }
bellardc33a3462003-07-29 20:50:33 +0000893 }
bellardc33a3462003-07-29 20:50:33 +0000894}
895
Andreas Färbera47dddd2013-09-03 17:38:47 +0200896void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000897{
898 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000899 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000900
901 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000902 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000903 fprintf(stderr, "qemu: fatal: ");
904 vfprintf(stderr, fmt, ap);
905 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200906 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100907 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000908 qemu_log("qemu: fatal: ");
909 qemu_log_vprintf(fmt, ap2);
910 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200911 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000912 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000913 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000914 }
pbrook493ae1f2007-11-23 16:53:59 +0000915 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000916 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300917 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200918#if defined(CONFIG_USER_ONLY)
919 {
920 struct sigaction act;
921 sigfillset(&act.sa_mask);
922 act.sa_handler = SIG_DFL;
923 sigaction(SIGABRT, &act, NULL);
924 }
925#endif
bellard75012672003-06-21 13:11:07 +0000926 abort();
927}
928
bellard01243112004-01-04 15:48:17 +0000929#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400930/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200931static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
932{
933 RAMBlock *block;
934
Paolo Bonzini43771532013-09-09 17:58:40 +0200935 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200936 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200937 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200938 }
Mike Day0dc3f442013-09-05 14:41:35 -0400939 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200940 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200941 goto found;
942 }
943 }
944
945 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
946 abort();
947
948found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200949 /* It is safe to write mru_block outside the iothread lock. This
950 * is what happens:
951 *
952 * mru_block = xxx
953 * rcu_read_unlock()
954 * xxx removed from list
955 * rcu_read_lock()
956 * read mru_block
957 * mru_block = NULL;
958 * call_rcu(reclaim_ramblock, xxx);
959 * rcu_read_unlock()
960 *
961 * atomic_rcu_set is not needed here. The block was already published
962 * when it was placed into the list. Here we're just making an extra
963 * copy of the pointer.
964 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200965 ram_list.mru_block = block;
966 return block;
967}
968
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200969static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000970{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700971 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200972 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200973 RAMBlock *block;
974 ram_addr_t end;
975
976 end = TARGET_PAGE_ALIGN(start + length);
977 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000978
Mike Day0dc3f442013-09-05 14:41:35 -0400979 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200980 block = qemu_get_ram_block(start);
981 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200982 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700983 CPU_FOREACH(cpu) {
984 tlb_reset_dirty(cpu, start1, length);
985 }
Mike Day0dc3f442013-09-05 14:41:35 -0400986 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200987}
988
989/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000990bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
991 ram_addr_t length,
992 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200993{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000994 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000995 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000996 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200997
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000998 if (length == 0) {
999 return false;
1000 }
1001
1002 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1003 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001004
1005 rcu_read_lock();
1006
1007 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1008
1009 while (page < end) {
1010 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1011 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1012 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1013
1014 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1015 offset, num);
1016 page += num;
1017 }
1018
1019 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001020
1021 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001022 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001023 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001024
1025 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001026}
1027
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001028/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001029hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001030 MemoryRegionSection *section,
1031 target_ulong vaddr,
1032 hwaddr paddr, hwaddr xlat,
1033 int prot,
1034 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001035{
Avi Kivitya8170e52012-10-23 12:30:10 +02001036 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001037 CPUWatchpoint *wp;
1038
Blue Swirlcc5bea62012-04-14 14:56:48 +00001039 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001040 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001041 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001042 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001043 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001044 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001045 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001046 }
1047 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001048 AddressSpaceDispatch *d;
1049
1050 d = atomic_rcu_read(&section->address_space->dispatch);
1051 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001052 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001053 }
1054
1055 /* Make accesses to pages with watchpoints go via the
1056 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001057 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001058 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001059 /* Avoid trapping reads of pages with a write breakpoint. */
1060 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001061 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001062 *address |= TLB_MMIO;
1063 break;
1064 }
1065 }
1066 }
1067
1068 return iotlb;
1069}
bellard9fa3e852004-01-04 18:06:42 +00001070#endif /* defined(CONFIG_USER_ONLY) */
1071
pbrooke2eef172008-06-08 01:09:01 +00001072#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001073
Anthony Liguoric227f092009-10-01 16:12:16 -05001074static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001075 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001076static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001077
Igor Mammedova2b257d2014-10-31 16:38:37 +00001078static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1079 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001080
1081/*
1082 * Set a custom physical guest memory alloator.
1083 * Accelerators with unusual needs may need this. Hopefully, we can
1084 * get rid of it eventually.
1085 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001086void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001087{
1088 phys_mem_alloc = alloc;
1089}
1090
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001091static uint16_t phys_section_add(PhysPageMap *map,
1092 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001093{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001094 /* The physical section number is ORed with a page-aligned
1095 * pointer to produce the iotlb entries. Thus it should
1096 * never overflow into the page-aligned value.
1097 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001098 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001099
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001100 if (map->sections_nb == map->sections_nb_alloc) {
1101 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1102 map->sections = g_renew(MemoryRegionSection, map->sections,
1103 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001104 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001105 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001106 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001107 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001108}
1109
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001110static void phys_section_destroy(MemoryRegion *mr)
1111{
Don Slutz55b4e802015-11-30 17:11:04 -05001112 bool have_sub_page = mr->subpage;
1113
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001114 memory_region_unref(mr);
1115
Don Slutz55b4e802015-11-30 17:11:04 -05001116 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001117 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001118 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001119 g_free(subpage);
1120 }
1121}
1122
Paolo Bonzini60926662013-05-29 12:30:26 +02001123static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001124{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001125 while (map->sections_nb > 0) {
1126 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001127 phys_section_destroy(section->mr);
1128 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001129 g_free(map->sections);
1130 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001131}
1132
Avi Kivityac1970f2012-10-03 16:22:53 +02001133static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001134{
1135 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001136 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001137 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001138 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001139 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001140 MemoryRegionSection subsection = {
1141 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001142 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001143 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001144 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001145
Avi Kivityf3705d52012-03-08 16:16:34 +02001146 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001147
Avi Kivityf3705d52012-03-08 16:16:34 +02001148 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001149 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001150 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001151 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001152 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001153 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001154 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001155 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001156 }
1157 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001158 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001159 subpage_register(subpage, start, end,
1160 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001161}
1162
1163
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001164static void register_multipage(AddressSpaceDispatch *d,
1165 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001166{
Avi Kivitya8170e52012-10-23 12:30:10 +02001167 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001168 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001169 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1170 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001171
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001172 assert(num_pages);
1173 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001174}
1175
Avi Kivityac1970f2012-10-03 16:22:53 +02001176static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001177{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001178 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001179 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001180 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001181 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001182
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001183 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1184 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1185 - now.offset_within_address_space;
1186
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001187 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001188 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001189 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001190 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001191 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001192 while (int128_ne(remain.size, now.size)) {
1193 remain.size = int128_sub(remain.size, now.size);
1194 remain.offset_within_address_space += int128_get64(now.size);
1195 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001196 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001197 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001198 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001199 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001200 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001201 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001202 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001203 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001204 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001205 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001206 }
1207}
1208
Sheng Yang62a27442010-01-26 19:21:16 +08001209void qemu_flush_coalesced_mmio_buffer(void)
1210{
1211 if (kvm_enabled())
1212 kvm_flush_coalesced_mmio_buffer();
1213}
1214
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001215void qemu_mutex_lock_ramlist(void)
1216{
1217 qemu_mutex_lock(&ram_list.mutex);
1218}
1219
1220void qemu_mutex_unlock_ramlist(void)
1221{
1222 qemu_mutex_unlock(&ram_list.mutex);
1223}
1224
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001225#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001226static void *file_ram_alloc(RAMBlock *block,
1227 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001228 const char *path,
1229 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001230{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001231 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001232 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001233 char *sanitized_name;
1234 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001235 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001236 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001237 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001238
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001239 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1240 error_setg(errp,
1241 "host lacks kvm mmu notifiers, -mem-path unsupported");
1242 return NULL;
1243 }
1244
1245 for (;;) {
1246 fd = open(path, O_RDWR);
1247 if (fd >= 0) {
1248 /* @path names an existing file, use it */
1249 break;
1250 }
1251 if (errno == ENOENT) {
1252 /* @path names a file that doesn't exist, create it */
1253 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1254 if (fd >= 0) {
1255 unlink_on_error = true;
1256 break;
1257 }
1258 } else if (errno == EISDIR) {
1259 /* @path names a directory, create a file there */
1260 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1261 sanitized_name = g_strdup(memory_region_name(block->mr));
1262 for (c = sanitized_name; *c != '\0'; c++) {
1263 if (*c == '/') {
1264 *c = '_';
1265 }
1266 }
1267
1268 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1269 sanitized_name);
1270 g_free(sanitized_name);
1271
1272 fd = mkstemp(filename);
1273 if (fd >= 0) {
1274 unlink(filename);
1275 g_free(filename);
1276 break;
1277 }
1278 g_free(filename);
1279 }
1280 if (errno != EEXIST && errno != EINTR) {
1281 error_setg_errno(errp, errno,
1282 "can't open backing store %s for guest RAM",
1283 path);
1284 goto error;
1285 }
1286 /*
1287 * Try again on EINTR and EEXIST. The latter happens when
1288 * something else creates the file between our two open().
1289 */
1290 }
1291
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001292 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001293 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001294
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001295 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001296 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001297 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001298 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001299 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001300 }
1301
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001302 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001303
1304 /*
1305 * ftruncate is not supported by hugetlbfs in older
1306 * hosts, so don't bother bailing out on errors.
1307 * If anything goes wrong with it under other filesystems,
1308 * mmap will fail.
1309 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001310 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001311 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001312 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001313
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001314 area = qemu_ram_mmap(fd, memory, block->mr->align,
1315 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001316 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001317 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001318 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001319 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001320 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001321
1322 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001323 os_mem_prealloc(fd, area, memory, errp);
1324 if (errp && *errp) {
1325 goto error;
1326 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001327 }
1328
Alex Williamson04b16652010-07-02 11:13:17 -06001329 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001330 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001331
1332error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001333 if (area != MAP_FAILED) {
1334 qemu_ram_munmap(area, memory);
1335 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001336 if (unlink_on_error) {
1337 unlink(path);
1338 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001339 if (fd != -1) {
1340 close(fd);
1341 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001342 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001343}
1344#endif
1345
Mike Day0dc3f442013-09-05 14:41:35 -04001346/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001347static ram_addr_t find_ram_offset(ram_addr_t size)
1348{
Alex Williamson04b16652010-07-02 11:13:17 -06001349 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001350 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001351
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001352 assert(size != 0); /* it would hand out same offset multiple times */
1353
Mike Day0dc3f442013-09-05 14:41:35 -04001354 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001355 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001356 }
Alex Williamson04b16652010-07-02 11:13:17 -06001357
Mike Day0dc3f442013-09-05 14:41:35 -04001358 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001359 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001360
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001361 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001362
Mike Day0dc3f442013-09-05 14:41:35 -04001363 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001364 if (next_block->offset >= end) {
1365 next = MIN(next, next_block->offset);
1366 }
1367 }
1368 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001369 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001370 mingap = next - end;
1371 }
1372 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001373
1374 if (offset == RAM_ADDR_MAX) {
1375 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1376 (uint64_t)size);
1377 abort();
1378 }
1379
Alex Williamson04b16652010-07-02 11:13:17 -06001380 return offset;
1381}
1382
Juan Quintela652d7ec2012-07-20 10:37:54 +02001383ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001384{
Alex Williamsond17b5282010-06-25 11:08:38 -06001385 RAMBlock *block;
1386 ram_addr_t last = 0;
1387
Mike Day0dc3f442013-09-05 14:41:35 -04001388 rcu_read_lock();
1389 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001390 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001391 }
Mike Day0dc3f442013-09-05 14:41:35 -04001392 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001393 return last;
1394}
1395
Jason Baronddb97f12012-08-02 15:44:16 -04001396static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1397{
1398 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001399
1400 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001401 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001402 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1403 if (ret) {
1404 perror("qemu_madvise");
1405 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1406 "but dump_guest_core=off specified\n");
1407 }
1408 }
1409}
1410
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001411const char *qemu_ram_get_idstr(RAMBlock *rb)
1412{
1413 return rb->idstr;
1414}
1415
Mike Dayae3a7042013-09-05 14:41:35 -04001416/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001417void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001418{
Gongleifa53a0e2016-05-10 10:04:59 +08001419 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001420
Avi Kivityc5705a72011-12-20 15:59:12 +02001421 assert(new_block);
1422 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001423
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001424 if (dev) {
1425 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001426 if (id) {
1427 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001428 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001429 }
1430 }
1431 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1432
Gongleiab0a9952016-05-10 10:05:00 +08001433 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001434 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001435 if (block != new_block &&
1436 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001437 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1438 new_block->idstr);
1439 abort();
1440 }
1441 }
Mike Day0dc3f442013-09-05 14:41:35 -04001442 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001443}
1444
Mike Dayae3a7042013-09-05 14:41:35 -04001445/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001446void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001447{
Mike Dayae3a7042013-09-05 14:41:35 -04001448 /* FIXME: arch_init.c assumes that this is not called throughout
1449 * migration. Ignore the problem since hot-unplug during migration
1450 * does not work anyway.
1451 */
Hu Tao20cfe882014-04-02 15:13:26 +08001452 if (block) {
1453 memset(block->idstr, 0, sizeof(block->idstr));
1454 }
1455}
1456
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001457static int memory_try_enable_merging(void *addr, size_t len)
1458{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001459 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001460 /* disabled by the user */
1461 return 0;
1462 }
1463
1464 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1465}
1466
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001467/* Only legal before guest might have detected the memory size: e.g. on
1468 * incoming migration, or right after reset.
1469 *
1470 * As memory core doesn't know how is memory accessed, it is up to
1471 * resize callback to update device state and/or add assertions to detect
1472 * misuse, if necessary.
1473 */
Gongleifa53a0e2016-05-10 10:04:59 +08001474int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001475{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001476 assert(block);
1477
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001478 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001479
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001480 if (block->used_length == newsize) {
1481 return 0;
1482 }
1483
1484 if (!(block->flags & RAM_RESIZEABLE)) {
1485 error_setg_errno(errp, EINVAL,
1486 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1487 " in != 0x" RAM_ADDR_FMT, block->idstr,
1488 newsize, block->used_length);
1489 return -EINVAL;
1490 }
1491
1492 if (block->max_length < newsize) {
1493 error_setg_errno(errp, EINVAL,
1494 "Length too large: %s: 0x" RAM_ADDR_FMT
1495 " > 0x" RAM_ADDR_FMT, block->idstr,
1496 newsize, block->max_length);
1497 return -EINVAL;
1498 }
1499
1500 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1501 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001502 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1503 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001504 memory_region_set_size(block->mr, newsize);
1505 if (block->resized) {
1506 block->resized(block->idstr, newsize, block->host);
1507 }
1508 return 0;
1509}
1510
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001511/* Called with ram_list.mutex held */
1512static void dirty_memory_extend(ram_addr_t old_ram_size,
1513 ram_addr_t new_ram_size)
1514{
1515 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1516 DIRTY_MEMORY_BLOCK_SIZE);
1517 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1518 DIRTY_MEMORY_BLOCK_SIZE);
1519 int i;
1520
1521 /* Only need to extend if block count increased */
1522 if (new_num_blocks <= old_num_blocks) {
1523 return;
1524 }
1525
1526 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1527 DirtyMemoryBlocks *old_blocks;
1528 DirtyMemoryBlocks *new_blocks;
1529 int j;
1530
1531 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1532 new_blocks = g_malloc(sizeof(*new_blocks) +
1533 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1534
1535 if (old_num_blocks) {
1536 memcpy(new_blocks->blocks, old_blocks->blocks,
1537 old_num_blocks * sizeof(old_blocks->blocks[0]));
1538 }
1539
1540 for (j = old_num_blocks; j < new_num_blocks; j++) {
1541 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1542 }
1543
1544 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1545
1546 if (old_blocks) {
1547 g_free_rcu(old_blocks, rcu);
1548 }
1549 }
1550}
1551
Fam Zheng528f46a2016-03-01 14:18:18 +08001552static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001553{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001554 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001555 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001556 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001557 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001558
1559 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001560
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001561 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001562 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001563
1564 if (!new_block->host) {
1565 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001566 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001567 new_block->mr, &err);
1568 if (err) {
1569 error_propagate(errp, err);
1570 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001571 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001572 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001573 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001574 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001575 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001576 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001577 error_setg_errno(errp, errno,
1578 "cannot set up guest memory '%s'",
1579 memory_region_name(new_block->mr));
1580 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001581 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001582 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001583 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001584 }
1585 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001586
Li Zhijiandd631692015-07-02 20:18:06 +08001587 new_ram_size = MAX(old_ram_size,
1588 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1589 if (new_ram_size > old_ram_size) {
1590 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001591 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001592 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001593 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1594 * QLIST (which has an RCU-friendly variant) does not have insertion at
1595 * tail, so save the last element in last_block.
1596 */
Mike Day0dc3f442013-09-05 14:41:35 -04001597 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001598 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001599 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001600 break;
1601 }
1602 }
1603 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001604 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001605 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001606 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001607 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001608 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001609 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001610 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001611
Mike Day0dc3f442013-09-05 14:41:35 -04001612 /* Write list before version */
1613 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001614 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001615 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001616
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001617 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001618 new_block->used_length,
1619 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001620
Paolo Bonzinia904c912015-01-21 16:18:35 +01001621 if (new_block->host) {
1622 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1623 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
Cao jinc2cd6272016-09-12 14:34:56 +08001624 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
Paolo Bonzinia904c912015-01-21 16:18:35 +01001625 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001626 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001627}
1628
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001629#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001630RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1631 bool share, const char *mem_path,
1632 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001633{
1634 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001635 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636
1637 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001638 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001639 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001640 }
1641
1642 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1643 /*
1644 * file_ram_alloc() needs to allocate just like
1645 * phys_mem_alloc, but we haven't bothered to provide
1646 * a hook there.
1647 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001648 error_setg(errp,
1649 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001650 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001651 }
1652
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001653 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001654 new_block = g_malloc0(sizeof(*new_block));
1655 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001656 new_block->used_length = size;
1657 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001658 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001659 new_block->host = file_ram_alloc(new_block, size,
1660 mem_path, errp);
1661 if (!new_block->host) {
1662 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001663 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001664 }
1665
Fam Zheng528f46a2016-03-01 14:18:18 +08001666 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001667 if (local_err) {
1668 g_free(new_block);
1669 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001670 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001671 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001672 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001673}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001674#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001675
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001676static
Fam Zheng528f46a2016-03-01 14:18:18 +08001677RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1678 void (*resized)(const char*,
1679 uint64_t length,
1680 void *host),
1681 void *host, bool resizeable,
1682 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001683{
1684 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001685 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001686
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001687 size = HOST_PAGE_ALIGN(size);
1688 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001689 new_block = g_malloc0(sizeof(*new_block));
1690 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001691 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001692 new_block->used_length = size;
1693 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001694 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001695 new_block->fd = -1;
1696 new_block->host = host;
1697 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001698 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001699 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001700 if (resizeable) {
1701 new_block->flags |= RAM_RESIZEABLE;
1702 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001703 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001704 if (local_err) {
1705 g_free(new_block);
1706 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001707 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001708 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001709 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001710}
1711
Fam Zheng528f46a2016-03-01 14:18:18 +08001712RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001713 MemoryRegion *mr, Error **errp)
1714{
1715 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1716}
1717
Fam Zheng528f46a2016-03-01 14:18:18 +08001718RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001719{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001720 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1721}
1722
Fam Zheng528f46a2016-03-01 14:18:18 +08001723RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001724 void (*resized)(const char*,
1725 uint64_t length,
1726 void *host),
1727 MemoryRegion *mr, Error **errp)
1728{
1729 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001730}
bellarde9a1ab12007-02-08 23:08:38 +00001731
Paolo Bonzini43771532013-09-09 17:58:40 +02001732static void reclaim_ramblock(RAMBlock *block)
1733{
1734 if (block->flags & RAM_PREALLOC) {
1735 ;
1736 } else if (xen_enabled()) {
1737 xen_invalidate_map_cache_entry(block->host);
1738#ifndef _WIN32
1739 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001740 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001741 close(block->fd);
1742#endif
1743 } else {
1744 qemu_anon_ram_free(block->host, block->max_length);
1745 }
1746 g_free(block);
1747}
1748
Fam Zhengf1060c52016-03-01 14:18:22 +08001749void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001750{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001751 if (!block) {
1752 return;
1753 }
1754
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001755 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001756 QLIST_REMOVE_RCU(block, next);
1757 ram_list.mru_block = NULL;
1758 /* Write list before version */
1759 smp_wmb();
1760 ram_list.version++;
1761 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001762 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001763}
1764
Huang Yingcd19cfa2011-03-02 08:56:19 +01001765#ifndef _WIN32
1766void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1767{
1768 RAMBlock *block;
1769 ram_addr_t offset;
1770 int flags;
1771 void *area, *vaddr;
1772
Mike Day0dc3f442013-09-05 14:41:35 -04001773 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001774 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001775 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001776 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001777 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001778 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001779 } else if (xen_enabled()) {
1780 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001781 } else {
1782 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001783 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001784 flags |= (block->flags & RAM_SHARED ?
1785 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001786 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1787 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001788 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001789 /*
1790 * Remap needs to match alloc. Accelerators that
1791 * set phys_mem_alloc never remap. If they did,
1792 * we'd need a remap hook here.
1793 */
1794 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1795
Huang Yingcd19cfa2011-03-02 08:56:19 +01001796 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1797 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1798 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001799 }
1800 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001801 fprintf(stderr, "Could not remap addr: "
1802 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001803 length, addr);
1804 exit(1);
1805 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001806 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001807 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001808 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001809 }
1810 }
1811}
1812#endif /* !_WIN32 */
1813
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001814/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001815 * This should not be used for general purpose DMA. Use address_space_map
1816 * or address_space_rw instead. For local memory (e.g. video ram) that the
1817 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001818 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001819 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001820 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001821void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001822{
Gonglei3655cb92016-02-20 10:35:20 +08001823 RAMBlock *block = ram_block;
1824
1825 if (block == NULL) {
1826 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001827 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001828 }
Mike Dayae3a7042013-09-05 14:41:35 -04001829
1830 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001831 /* We need to check if the requested address is in the RAM
1832 * because we don't want to map the entire memory in QEMU.
1833 * In that case just map until the end of the page.
1834 */
1835 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001836 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001837 }
Mike Dayae3a7042013-09-05 14:41:35 -04001838
1839 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001840 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001841 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001842}
1843
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001844/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001845 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001846 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001847 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001848 */
Gonglei3655cb92016-02-20 10:35:20 +08001849static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1850 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001851{
Gonglei3655cb92016-02-20 10:35:20 +08001852 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001853 if (*size == 0) {
1854 return NULL;
1855 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001856
Gonglei3655cb92016-02-20 10:35:20 +08001857 if (block == NULL) {
1858 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001859 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001860 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001861 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001862
1863 if (xen_enabled() && block->host == NULL) {
1864 /* We need to check if the requested address is in the RAM
1865 * because we don't want to map the entire memory in QEMU.
1866 * In that case just map the requested area.
1867 */
1868 if (block->offset == 0) {
1869 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001870 }
1871
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001872 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001873 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001874
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001875 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001876}
1877
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001878/*
1879 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1880 * in that RAMBlock.
1881 *
1882 * ptr: Host pointer to look up
1883 * round_offset: If true round the result offset down to a page boundary
1884 * *ram_addr: set to result ram_addr
1885 * *offset: set to result offset within the RAMBlock
1886 *
1887 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001888 *
1889 * By the time this function returns, the returned pointer is not protected
1890 * by RCU anymore. If the caller is not within an RCU critical section and
1891 * does not hold the iothread lock, it must have other means of protecting the
1892 * pointer, such as a reference to the region that includes the incoming
1893 * ram_addr_t.
1894 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001895RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001896 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001897{
pbrook94a6b542009-04-11 17:15:54 +00001898 RAMBlock *block;
1899 uint8_t *host = ptr;
1900
Jan Kiszka868bb332011-06-21 22:59:09 +02001901 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001902 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001903 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001904 ram_addr = xen_ram_addr_from_mapcache(ptr);
1905 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001906 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001907 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001908 }
Mike Day0dc3f442013-09-05 14:41:35 -04001909 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001910 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001911 }
1912
Mike Day0dc3f442013-09-05 14:41:35 -04001913 rcu_read_lock();
1914 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001915 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001916 goto found;
1917 }
1918
Mike Day0dc3f442013-09-05 14:41:35 -04001919 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001920 /* This case append when the block is not mapped. */
1921 if (block->host == NULL) {
1922 continue;
1923 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001924 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001925 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001926 }
pbrook94a6b542009-04-11 17:15:54 +00001927 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001928
Mike Day0dc3f442013-09-05 14:41:35 -04001929 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001930 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001931
1932found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001933 *offset = (host - block->host);
1934 if (round_offset) {
1935 *offset &= TARGET_PAGE_MASK;
1936 }
Mike Day0dc3f442013-09-05 14:41:35 -04001937 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001938 return block;
1939}
1940
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001941/*
1942 * Finds the named RAMBlock
1943 *
1944 * name: The name of RAMBlock to find
1945 *
1946 * Returns: RAMBlock (or NULL if not found)
1947 */
1948RAMBlock *qemu_ram_block_by_name(const char *name)
1949{
1950 RAMBlock *block;
1951
1952 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1953 if (!strcmp(name, block->idstr)) {
1954 return block;
1955 }
1956 }
1957
1958 return NULL;
1959}
1960
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001961/* Some of the softmmu routines need to translate from a host pointer
1962 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001963ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001964{
1965 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001966 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001967
Paolo Bonzinif615f392016-05-26 10:07:50 +02001968 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001969 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001970 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001971 }
1972
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001973 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001974}
Alex Williamsonf471a172010-06-11 11:11:42 -06001975
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001976/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001977static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001978 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001979{
Juan Quintela52159192013-10-08 12:44:04 +02001980 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001981 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001982 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001983 switch (size) {
1984 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001985 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001986 break;
1987 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001988 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001989 break;
1990 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001991 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001992 break;
1993 default:
1994 abort();
1995 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001996 /* Set both VGA and migration bits for simplicity and to remove
1997 * the notdirty callback faster.
1998 */
1999 cpu_physical_memory_set_dirty_range(ram_addr, size,
2000 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002001 /* we remove the notdirty callback only if the code has been
2002 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002003 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002004 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002005 }
bellard1ccde1c2004-02-06 19:46:14 +00002006}
2007
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002008static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2009 unsigned size, bool is_write)
2010{
2011 return is_write;
2012}
2013
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002014static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002015 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002016 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002017 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002018};
2019
pbrook0f459d12008-06-09 00:20:13 +00002020/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002021static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002022{
Andreas Färber93afead2013-08-26 03:41:01 +02002023 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002024 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002025 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002026 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002027 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002028 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002029 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002030
Andreas Färberff4700b2013-08-26 18:23:18 +02002031 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002032 /* We re-entered the check after replacing the TB. Now raise
2033 * the debug interrupt so that is will trigger after the
2034 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002035 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002036 return;
2037 }
Andreas Färber93afead2013-08-26 03:41:01 +02002038 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002039 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002040 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2041 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002042 if (flags == BP_MEM_READ) {
2043 wp->flags |= BP_WATCHPOINT_HIT_READ;
2044 } else {
2045 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2046 }
2047 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002048 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002049 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002050 if (wp->flags & BP_CPU &&
2051 !cc->debug_check_watchpoint(cpu, wp)) {
2052 wp->flags &= ~BP_WATCHPOINT_HIT;
2053 continue;
2054 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002055 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002056 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002057 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002058 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002059 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002060 } else {
2061 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002062 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002063 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002064 }
aliguori06d55cc2008-11-18 20:24:06 +00002065 }
aliguori6e140f22008-11-18 20:37:55 +00002066 } else {
2067 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002068 }
2069 }
2070}
2071
pbrook6658ffb2007-03-16 23:58:11 +00002072/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2073 so these check for a hit then pass through to the normal out-of-line
2074 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002075static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2076 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002077{
Peter Maydell66b9b432015-04-26 16:49:24 +01002078 MemTxResult res;
2079 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002080 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2081 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002082
Peter Maydell66b9b432015-04-26 16:49:24 +01002083 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002084 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002085 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002086 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002087 break;
2088 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002089 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002090 break;
2091 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002092 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002093 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002094 default: abort();
2095 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002096 *pdata = data;
2097 return res;
2098}
2099
2100static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2101 uint64_t val, unsigned size,
2102 MemTxAttrs attrs)
2103{
2104 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002105 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2106 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002107
2108 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2109 switch (size) {
2110 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002111 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002112 break;
2113 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002114 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002115 break;
2116 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002117 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002118 break;
2119 default: abort();
2120 }
2121 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002122}
2123
Avi Kivity1ec9b902012-01-02 12:47:48 +02002124static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002125 .read_with_attrs = watch_mem_read,
2126 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002127 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002128};
pbrook6658ffb2007-03-16 23:58:11 +00002129
Peter Maydellf25a49e2015-04-26 16:49:24 +01002130static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2131 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002132{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002133 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002134 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002135 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002136
blueswir1db7b5422007-05-26 17:36:03 +00002137#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002138 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002139 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002140#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002141 res = address_space_read(subpage->as, addr + subpage->base,
2142 attrs, buf, len);
2143 if (res) {
2144 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002145 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002146 switch (len) {
2147 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002148 *data = ldub_p(buf);
2149 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002150 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002151 *data = lduw_p(buf);
2152 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002153 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002154 *data = ldl_p(buf);
2155 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002156 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002157 *data = ldq_p(buf);
2158 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002159 default:
2160 abort();
2161 }
blueswir1db7b5422007-05-26 17:36:03 +00002162}
2163
Peter Maydellf25a49e2015-04-26 16:49:24 +01002164static MemTxResult subpage_write(void *opaque, hwaddr addr,
2165 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002166{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002167 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002168 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002169
blueswir1db7b5422007-05-26 17:36:03 +00002170#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002171 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002172 " value %"PRIx64"\n",
2173 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002174#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002175 switch (len) {
2176 case 1:
2177 stb_p(buf, value);
2178 break;
2179 case 2:
2180 stw_p(buf, value);
2181 break;
2182 case 4:
2183 stl_p(buf, value);
2184 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002185 case 8:
2186 stq_p(buf, value);
2187 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002188 default:
2189 abort();
2190 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002191 return address_space_write(subpage->as, addr + subpage->base,
2192 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002193}
2194
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002195static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002196 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002197{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002198 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002199#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002200 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002201 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002202#endif
2203
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002204 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002205 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002206}
2207
Avi Kivity70c68e42012-01-02 12:32:48 +02002208static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002209 .read_with_attrs = subpage_read,
2210 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002211 .impl.min_access_size = 1,
2212 .impl.max_access_size = 8,
2213 .valid.min_access_size = 1,
2214 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002215 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002216 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002217};
2218
Anthony Liguoric227f092009-10-01 16:12:16 -05002219static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002220 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002221{
2222 int idx, eidx;
2223
2224 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2225 return -1;
2226 idx = SUBPAGE_IDX(start);
2227 eidx = SUBPAGE_IDX(end);
2228#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002229 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2230 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002231#endif
blueswir1db7b5422007-05-26 17:36:03 +00002232 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002233 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002234 }
2235
2236 return 0;
2237}
2238
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002239static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002240{
Anthony Liguoric227f092009-10-01 16:12:16 -05002241 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002242
Anthony Liguori7267c092011-08-20 22:09:37 -05002243 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002244
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002245 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002246 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002247 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002248 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002249 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002250#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002251 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2252 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002253#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002254 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002255
2256 return mmio;
2257}
2258
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002259static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2260 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002261{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002262 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002263 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002264 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002265 .mr = mr,
2266 .offset_within_address_space = 0,
2267 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002268 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002269 };
2270
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002271 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002272}
2273
Peter Maydella54c87b2016-01-21 14:15:05 +00002274MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002275{
Peter Maydella54c87b2016-01-21 14:15:05 +00002276 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2277 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002278 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002279 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002280
2281 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002282}
2283
Avi Kivitye9179ce2009-06-14 11:38:52 +03002284static void io_mem_init(void)
2285{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002286 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002287 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002288 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002289 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002290 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002291 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002292 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002293}
2294
Avi Kivityac1970f2012-10-03 16:22:53 +02002295static void mem_begin(MemoryListener *listener)
2296{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002297 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002298 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2299 uint16_t n;
2300
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002301 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002302 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002303 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002304 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002305 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002306 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002307 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002308 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002309
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002310 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002311 d->as = as;
2312 as->next_dispatch = d;
2313}
2314
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002315static void address_space_dispatch_free(AddressSpaceDispatch *d)
2316{
2317 phys_sections_free(&d->map);
2318 g_free(d);
2319}
2320
Paolo Bonzini00752702013-05-29 12:13:54 +02002321static void mem_commit(MemoryListener *listener)
2322{
2323 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002324 AddressSpaceDispatch *cur = as->dispatch;
2325 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002326
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002327 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002328
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002329 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002330 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002331 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002332 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002333}
2334
Avi Kivity1d711482012-10-02 18:54:45 +02002335static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002336{
Peter Maydell32857f42015-10-01 15:29:50 +01002337 CPUAddressSpace *cpuas;
2338 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002339
2340 /* since each CPU stores ram addresses in its TLB cache, we must
2341 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002342 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2343 cpu_reloading_memory_map();
2344 /* The CPU and TLB are protected by the iothread lock.
2345 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2346 * may have split the RCU critical section.
2347 */
2348 d = atomic_rcu_read(&cpuas->as->dispatch);
2349 cpuas->memory_dispatch = d;
2350 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002351}
2352
Avi Kivityac1970f2012-10-03 16:22:53 +02002353void address_space_init_dispatch(AddressSpace *as)
2354{
Paolo Bonzini00752702013-05-29 12:13:54 +02002355 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002356 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002357 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002358 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002359 .region_add = mem_add,
2360 .region_nop = mem_add,
2361 .priority = 0,
2362 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002363 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002364}
2365
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002366void address_space_unregister(AddressSpace *as)
2367{
2368 memory_listener_unregister(&as->dispatch_listener);
2369}
2370
Avi Kivity83f3c252012-10-07 12:59:55 +02002371void address_space_destroy_dispatch(AddressSpace *as)
2372{
2373 AddressSpaceDispatch *d = as->dispatch;
2374
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002375 atomic_rcu_set(&as->dispatch, NULL);
2376 if (d) {
2377 call_rcu(d, address_space_dispatch_free, rcu);
2378 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002379}
2380
Avi Kivity62152b82011-07-26 14:26:14 +03002381static void memory_map_init(void)
2382{
Anthony Liguori7267c092011-08-20 22:09:37 -05002383 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002384
Paolo Bonzini57271d62013-11-07 17:14:37 +01002385 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002386 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002387
Anthony Liguori7267c092011-08-20 22:09:37 -05002388 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002389 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2390 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002391 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002392}
2393
2394MemoryRegion *get_system_memory(void)
2395{
2396 return system_memory;
2397}
2398
Avi Kivity309cb472011-08-08 16:09:03 +03002399MemoryRegion *get_system_io(void)
2400{
2401 return system_io;
2402}
2403
pbrooke2eef172008-06-08 01:09:01 +00002404#endif /* !defined(CONFIG_USER_ONLY) */
2405
bellard13eb76e2004-01-24 15:23:36 +00002406/* physical memory access (slow version, mainly for debug) */
2407#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002408int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002409 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002410{
2411 int l, flags;
2412 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002413 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002414
2415 while (len > 0) {
2416 page = addr & TARGET_PAGE_MASK;
2417 l = (page + TARGET_PAGE_SIZE) - addr;
2418 if (l > len)
2419 l = len;
2420 flags = page_get_flags(page);
2421 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002422 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002423 if (is_write) {
2424 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002425 return -1;
bellard579a97f2007-11-11 14:26:47 +00002426 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002427 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002428 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002429 memcpy(p, buf, l);
2430 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002431 } else {
2432 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002433 return -1;
bellard579a97f2007-11-11 14:26:47 +00002434 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002435 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002436 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002437 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002438 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002439 }
2440 len -= l;
2441 buf += l;
2442 addr += l;
2443 }
Paul Brooka68fe892010-03-01 00:08:59 +00002444 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002445}
bellard8df1cd02005-01-28 22:37:22 +00002446
bellard13eb76e2004-01-24 15:23:36 +00002447#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002448
Paolo Bonzini845b6212015-03-23 11:45:53 +01002449static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002450 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002451{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002452 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002453 addr += memory_region_get_ram_addr(mr);
2454
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002455 /* No early return if dirty_log_mask is or becomes 0, because
2456 * cpu_physical_memory_set_dirty_range will still call
2457 * xen_modified_memory.
2458 */
2459 if (dirty_log_mask) {
2460 dirty_log_mask =
2461 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002462 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002463 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2464 tb_invalidate_phys_range(addr, addr + length);
2465 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2466 }
2467 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002468}
2469
Richard Henderson23326162013-07-08 14:55:59 -07002470static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002471{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002472 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002473
2474 /* Regions are assumed to support 1-4 byte accesses unless
2475 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002476 if (access_size_max == 0) {
2477 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002478 }
Richard Henderson23326162013-07-08 14:55:59 -07002479
2480 /* Bound the maximum access by the alignment of the address. */
2481 if (!mr->ops->impl.unaligned) {
2482 unsigned align_size_max = addr & -addr;
2483 if (align_size_max != 0 && align_size_max < access_size_max) {
2484 access_size_max = align_size_max;
2485 }
2486 }
2487
2488 /* Don't attempt accesses larger than the maximum. */
2489 if (l > access_size_max) {
2490 l = access_size_max;
2491 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002492 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002493
2494 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002495}
2496
Jan Kiszka4840f102015-06-18 18:47:22 +02002497static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002498{
Jan Kiszka4840f102015-06-18 18:47:22 +02002499 bool unlocked = !qemu_mutex_iothread_locked();
2500 bool release_lock = false;
2501
2502 if (unlocked && mr->global_locking) {
2503 qemu_mutex_lock_iothread();
2504 unlocked = false;
2505 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002506 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002507 if (mr->flush_coalesced_mmio) {
2508 if (unlocked) {
2509 qemu_mutex_lock_iothread();
2510 }
2511 qemu_flush_coalesced_mmio_buffer();
2512 if (unlocked) {
2513 qemu_mutex_unlock_iothread();
2514 }
2515 }
2516
2517 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002518}
2519
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002520/* Called within RCU critical section. */
2521static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2522 MemTxAttrs attrs,
2523 const uint8_t *buf,
2524 int len, hwaddr addr1,
2525 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002526{
bellard13eb76e2004-01-24 15:23:36 +00002527 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002528 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002529 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002530 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002531
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002532 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002533 if (!memory_access_is_direct(mr, true)) {
2534 release_lock |= prepare_mmio_access(mr);
2535 l = memory_access_size(mr, l, addr1);
2536 /* XXX: could force current_cpu to NULL to avoid
2537 potential bugs */
2538 switch (l) {
2539 case 8:
2540 /* 64 bit write access */
2541 val = ldq_p(buf);
2542 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2543 attrs);
2544 break;
2545 case 4:
2546 /* 32 bit write access */
2547 val = ldl_p(buf);
2548 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2549 attrs);
2550 break;
2551 case 2:
2552 /* 16 bit write access */
2553 val = lduw_p(buf);
2554 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2555 attrs);
2556 break;
2557 case 1:
2558 /* 8 bit write access */
2559 val = ldub_p(buf);
2560 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2561 attrs);
2562 break;
2563 default:
2564 abort();
bellard13eb76e2004-01-24 15:23:36 +00002565 }
2566 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002567 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002568 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002569 memcpy(ptr, buf, l);
2570 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002571 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002572
2573 if (release_lock) {
2574 qemu_mutex_unlock_iothread();
2575 release_lock = false;
2576 }
2577
bellard13eb76e2004-01-24 15:23:36 +00002578 len -= l;
2579 buf += l;
2580 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002581
2582 if (!len) {
2583 break;
2584 }
2585
2586 l = len;
2587 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002588 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002589
Peter Maydell3b643492015-04-26 16:49:23 +01002590 return result;
bellard13eb76e2004-01-24 15:23:36 +00002591}
bellard8df1cd02005-01-28 22:37:22 +00002592
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002593MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2594 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002595{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002596 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002597 hwaddr addr1;
2598 MemoryRegion *mr;
2599 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002600
2601 if (len > 0) {
2602 rcu_read_lock();
2603 l = len;
2604 mr = address_space_translate(as, addr, &addr1, &l, true);
2605 result = address_space_write_continue(as, addr, attrs, buf, len,
2606 addr1, l, mr);
2607 rcu_read_unlock();
2608 }
2609
2610 return result;
2611}
2612
2613/* Called within RCU critical section. */
2614MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2615 MemTxAttrs attrs, uint8_t *buf,
2616 int len, hwaddr addr1, hwaddr l,
2617 MemoryRegion *mr)
2618{
2619 uint8_t *ptr;
2620 uint64_t val;
2621 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002622 bool release_lock = false;
2623
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002624 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002625 if (!memory_access_is_direct(mr, false)) {
2626 /* I/O case */
2627 release_lock |= prepare_mmio_access(mr);
2628 l = memory_access_size(mr, l, addr1);
2629 switch (l) {
2630 case 8:
2631 /* 64 bit read access */
2632 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2633 attrs);
2634 stq_p(buf, val);
2635 break;
2636 case 4:
2637 /* 32 bit read access */
2638 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2639 attrs);
2640 stl_p(buf, val);
2641 break;
2642 case 2:
2643 /* 16 bit read access */
2644 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2645 attrs);
2646 stw_p(buf, val);
2647 break;
2648 case 1:
2649 /* 8 bit read access */
2650 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2651 attrs);
2652 stb_p(buf, val);
2653 break;
2654 default:
2655 abort();
2656 }
2657 } else {
2658 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002659 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002660 memcpy(buf, ptr, l);
2661 }
2662
2663 if (release_lock) {
2664 qemu_mutex_unlock_iothread();
2665 release_lock = false;
2666 }
2667
2668 len -= l;
2669 buf += l;
2670 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002671
2672 if (!len) {
2673 break;
2674 }
2675
2676 l = len;
2677 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002678 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002679
2680 return result;
2681}
2682
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002683MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2684 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002685{
2686 hwaddr l;
2687 hwaddr addr1;
2688 MemoryRegion *mr;
2689 MemTxResult result = MEMTX_OK;
2690
2691 if (len > 0) {
2692 rcu_read_lock();
2693 l = len;
2694 mr = address_space_translate(as, addr, &addr1, &l, false);
2695 result = address_space_read_continue(as, addr, attrs, buf, len,
2696 addr1, l, mr);
2697 rcu_read_unlock();
2698 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002699
2700 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002701}
2702
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002703MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2704 uint8_t *buf, int len, bool is_write)
2705{
2706 if (is_write) {
2707 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2708 } else {
2709 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2710 }
2711}
Avi Kivityac1970f2012-10-03 16:22:53 +02002712
Avi Kivitya8170e52012-10-23 12:30:10 +02002713void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002714 int len, int is_write)
2715{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002716 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2717 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002718}
2719
Alexander Graf582b55a2013-12-11 14:17:44 +01002720enum write_rom_type {
2721 WRITE_DATA,
2722 FLUSH_CACHE,
2723};
2724
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002725static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002726 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002727{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002728 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002729 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002730 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002731 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002732
Paolo Bonzini41063e12015-03-18 14:21:43 +01002733 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002734 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002735 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002736 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002737
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002738 if (!(memory_region_is_ram(mr) ||
2739 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002740 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002741 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002742 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002743 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002744 switch (type) {
2745 case WRITE_DATA:
2746 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002747 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002748 break;
2749 case FLUSH_CACHE:
2750 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2751 break;
2752 }
bellardd0ecd2a2006-04-23 17:14:48 +00002753 }
2754 len -= l;
2755 buf += l;
2756 addr += l;
2757 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002758 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002759}
2760
Alexander Graf582b55a2013-12-11 14:17:44 +01002761/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002762void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002763 const uint8_t *buf, int len)
2764{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002765 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002766}
2767
2768void cpu_flush_icache_range(hwaddr start, int len)
2769{
2770 /*
2771 * This function should do the same thing as an icache flush that was
2772 * triggered from within the guest. For TCG we are always cache coherent,
2773 * so there is no need to flush anything. For KVM / Xen we need to flush
2774 * the host's instruction cache at least.
2775 */
2776 if (tcg_enabled()) {
2777 return;
2778 }
2779
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002780 cpu_physical_memory_write_rom_internal(&address_space_memory,
2781 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002782}
2783
aliguori6d16c2f2009-01-22 16:59:11 +00002784typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002785 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002786 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002787 hwaddr addr;
2788 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002789 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002790} BounceBuffer;
2791
2792static BounceBuffer bounce;
2793
aliguoriba223c22009-01-22 16:59:16 +00002794typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002795 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002796 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002797} MapClient;
2798
Fam Zheng38e047b2015-03-16 17:03:35 +08002799QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002800static QLIST_HEAD(map_client_list, MapClient) map_client_list
2801 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002802
Fam Zhenge95205e2015-03-16 17:03:37 +08002803static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002804{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002805 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002806 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002807}
2808
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002809static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002810{
2811 MapClient *client;
2812
Blue Swirl72cf2d42009-09-12 07:36:22 +00002813 while (!QLIST_EMPTY(&map_client_list)) {
2814 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002815 qemu_bh_schedule(client->bh);
2816 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002817 }
2818}
2819
Fam Zhenge95205e2015-03-16 17:03:37 +08002820void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002821{
2822 MapClient *client = g_malloc(sizeof(*client));
2823
Fam Zheng38e047b2015-03-16 17:03:35 +08002824 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002825 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002826 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002827 if (!atomic_read(&bounce.in_use)) {
2828 cpu_notify_map_clients_locked();
2829 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002830 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002831}
2832
Fam Zheng38e047b2015-03-16 17:03:35 +08002833void cpu_exec_init_all(void)
2834{
2835 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002836 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002837 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002838 qemu_mutex_init(&map_client_list_lock);
2839}
2840
Fam Zhenge95205e2015-03-16 17:03:37 +08002841void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002842{
Fam Zhenge95205e2015-03-16 17:03:37 +08002843 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002844
Fam Zhenge95205e2015-03-16 17:03:37 +08002845 qemu_mutex_lock(&map_client_list_lock);
2846 QLIST_FOREACH(client, &map_client_list, link) {
2847 if (client->bh == bh) {
2848 cpu_unregister_map_client_do(client);
2849 break;
2850 }
2851 }
2852 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002853}
2854
2855static void cpu_notify_map_clients(void)
2856{
Fam Zheng38e047b2015-03-16 17:03:35 +08002857 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002858 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002859 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002860}
2861
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002862bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2863{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002864 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002865 hwaddr l, xlat;
2866
Paolo Bonzini41063e12015-03-18 14:21:43 +01002867 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002868 while (len > 0) {
2869 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002870 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2871 if (!memory_access_is_direct(mr, is_write)) {
2872 l = memory_access_size(mr, l, addr);
2873 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002874 return false;
2875 }
2876 }
2877
2878 len -= l;
2879 addr += l;
2880 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002881 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002882 return true;
2883}
2884
aliguori6d16c2f2009-01-22 16:59:11 +00002885/* Map a physical memory region into a host virtual address.
2886 * May map a subset of the requested range, given by and returned in *plen.
2887 * May return NULL if resources needed to perform the mapping are exhausted.
2888 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002889 * Use cpu_register_map_client() to know when retrying the map operation is
2890 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002891 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002892void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002893 hwaddr addr,
2894 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002895 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002896{
Avi Kivitya8170e52012-10-23 12:30:10 +02002897 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002898 hwaddr done = 0;
2899 hwaddr l, xlat, base;
2900 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002901 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002902
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002903 if (len == 0) {
2904 return NULL;
2905 }
aliguori6d16c2f2009-01-22 16:59:11 +00002906
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002907 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002908 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002909 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002910
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002911 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002912 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002913 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002914 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002915 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002916 /* Avoid unbounded allocations */
2917 l = MIN(l, TARGET_PAGE_SIZE);
2918 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002919 bounce.addr = addr;
2920 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002921
2922 memory_region_ref(mr);
2923 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002924 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002925 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2926 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002927 }
aliguori6d16c2f2009-01-22 16:59:11 +00002928
Paolo Bonzini41063e12015-03-18 14:21:43 +01002929 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002930 *plen = l;
2931 return bounce.buffer;
2932 }
2933
2934 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002935
2936 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002937 len -= l;
2938 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002939 done += l;
2940 if (len == 0) {
2941 break;
2942 }
2943
2944 l = len;
2945 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2946 if (this_mr != mr || xlat != base + done) {
2947 break;
2948 }
aliguori6d16c2f2009-01-22 16:59:11 +00002949 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002950
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002951 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002952 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002953 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002954 rcu_read_unlock();
2955
2956 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002957}
2958
Avi Kivityac1970f2012-10-03 16:22:53 +02002959/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002960 * Will also mark the memory as dirty if is_write == 1. access_len gives
2961 * the amount of memory that was actually read or written by the caller.
2962 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002963void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2964 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002965{
2966 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002967 MemoryRegion *mr;
2968 ram_addr_t addr1;
2969
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002970 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002971 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002972 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002973 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002974 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002975 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002976 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002977 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002978 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002979 return;
2980 }
2981 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002982 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2983 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002984 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002985 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002986 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002987 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002988 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002989 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002990}
bellardd0ecd2a2006-04-23 17:14:48 +00002991
Avi Kivitya8170e52012-10-23 12:30:10 +02002992void *cpu_physical_memory_map(hwaddr addr,
2993 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002994 int is_write)
2995{
2996 return address_space_map(&address_space_memory, addr, plen, is_write);
2997}
2998
Avi Kivitya8170e52012-10-23 12:30:10 +02002999void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3000 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003001{
3002 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3003}
3004
bellard8df1cd02005-01-28 22:37:22 +00003005/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003006static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3007 MemTxAttrs attrs,
3008 MemTxResult *result,
3009 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003010{
bellard8df1cd02005-01-28 22:37:22 +00003011 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003012 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003013 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003014 hwaddr l = 4;
3015 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003016 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003017 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003018
Paolo Bonzini41063e12015-03-18 14:21:43 +01003019 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003020 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003021 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003022 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003023
bellard8df1cd02005-01-28 22:37:22 +00003024 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003025 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003026#if defined(TARGET_WORDS_BIGENDIAN)
3027 if (endian == DEVICE_LITTLE_ENDIAN) {
3028 val = bswap32(val);
3029 }
3030#else
3031 if (endian == DEVICE_BIG_ENDIAN) {
3032 val = bswap32(val);
3033 }
3034#endif
bellard8df1cd02005-01-28 22:37:22 +00003035 } else {
3036 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003037 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003038 switch (endian) {
3039 case DEVICE_LITTLE_ENDIAN:
3040 val = ldl_le_p(ptr);
3041 break;
3042 case DEVICE_BIG_ENDIAN:
3043 val = ldl_be_p(ptr);
3044 break;
3045 default:
3046 val = ldl_p(ptr);
3047 break;
3048 }
Peter Maydell50013112015-04-26 16:49:24 +01003049 r = MEMTX_OK;
3050 }
3051 if (result) {
3052 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003053 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003054 if (release_lock) {
3055 qemu_mutex_unlock_iothread();
3056 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003057 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003058 return val;
3059}
3060
Peter Maydell50013112015-04-26 16:49:24 +01003061uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3062 MemTxAttrs attrs, MemTxResult *result)
3063{
3064 return address_space_ldl_internal(as, addr, attrs, result,
3065 DEVICE_NATIVE_ENDIAN);
3066}
3067
3068uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3069 MemTxAttrs attrs, MemTxResult *result)
3070{
3071 return address_space_ldl_internal(as, addr, attrs, result,
3072 DEVICE_LITTLE_ENDIAN);
3073}
3074
3075uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3076 MemTxAttrs attrs, MemTxResult *result)
3077{
3078 return address_space_ldl_internal(as, addr, attrs, result,
3079 DEVICE_BIG_ENDIAN);
3080}
3081
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003082uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003083{
Peter Maydell50013112015-04-26 16:49:24 +01003084 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003085}
3086
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003087uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003088{
Peter Maydell50013112015-04-26 16:49:24 +01003089 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003090}
3091
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003092uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003093{
Peter Maydell50013112015-04-26 16:49:24 +01003094 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003095}
3096
bellard84b7b8e2005-11-28 21:19:04 +00003097/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003098static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3099 MemTxAttrs attrs,
3100 MemTxResult *result,
3101 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003102{
bellard84b7b8e2005-11-28 21:19:04 +00003103 uint8_t *ptr;
3104 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003105 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003106 hwaddr l = 8;
3107 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003108 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003109 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003110
Paolo Bonzini41063e12015-03-18 14:21:43 +01003111 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003112 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003113 false);
3114 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003115 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003116
bellard84b7b8e2005-11-28 21:19:04 +00003117 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003118 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003119#if defined(TARGET_WORDS_BIGENDIAN)
3120 if (endian == DEVICE_LITTLE_ENDIAN) {
3121 val = bswap64(val);
3122 }
3123#else
3124 if (endian == DEVICE_BIG_ENDIAN) {
3125 val = bswap64(val);
3126 }
3127#endif
bellard84b7b8e2005-11-28 21:19:04 +00003128 } else {
3129 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003130 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003131 switch (endian) {
3132 case DEVICE_LITTLE_ENDIAN:
3133 val = ldq_le_p(ptr);
3134 break;
3135 case DEVICE_BIG_ENDIAN:
3136 val = ldq_be_p(ptr);
3137 break;
3138 default:
3139 val = ldq_p(ptr);
3140 break;
3141 }
Peter Maydell50013112015-04-26 16:49:24 +01003142 r = MEMTX_OK;
3143 }
3144 if (result) {
3145 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003146 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003147 if (release_lock) {
3148 qemu_mutex_unlock_iothread();
3149 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003150 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003151 return val;
3152}
3153
Peter Maydell50013112015-04-26 16:49:24 +01003154uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3155 MemTxAttrs attrs, MemTxResult *result)
3156{
3157 return address_space_ldq_internal(as, addr, attrs, result,
3158 DEVICE_NATIVE_ENDIAN);
3159}
3160
3161uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3162 MemTxAttrs attrs, MemTxResult *result)
3163{
3164 return address_space_ldq_internal(as, addr, attrs, result,
3165 DEVICE_LITTLE_ENDIAN);
3166}
3167
3168uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3169 MemTxAttrs attrs, MemTxResult *result)
3170{
3171 return address_space_ldq_internal(as, addr, attrs, result,
3172 DEVICE_BIG_ENDIAN);
3173}
3174
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003175uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003176{
Peter Maydell50013112015-04-26 16:49:24 +01003177 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003178}
3179
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003180uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003181{
Peter Maydell50013112015-04-26 16:49:24 +01003182 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003183}
3184
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003185uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003186{
Peter Maydell50013112015-04-26 16:49:24 +01003187 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003188}
3189
bellardaab33092005-10-30 20:48:42 +00003190/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003191uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3192 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003193{
3194 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003195 MemTxResult r;
3196
3197 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3198 if (result) {
3199 *result = r;
3200 }
bellardaab33092005-10-30 20:48:42 +00003201 return val;
3202}
3203
Peter Maydell50013112015-04-26 16:49:24 +01003204uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3205{
3206 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3207}
3208
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003209/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003210static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3211 hwaddr addr,
3212 MemTxAttrs attrs,
3213 MemTxResult *result,
3214 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003215{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003216 uint8_t *ptr;
3217 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003218 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003219 hwaddr l = 2;
3220 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003221 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003222 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003223
Paolo Bonzini41063e12015-03-18 14:21:43 +01003224 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003225 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003226 false);
3227 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003228 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003229
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003230 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003231 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003232#if defined(TARGET_WORDS_BIGENDIAN)
3233 if (endian == DEVICE_LITTLE_ENDIAN) {
3234 val = bswap16(val);
3235 }
3236#else
3237 if (endian == DEVICE_BIG_ENDIAN) {
3238 val = bswap16(val);
3239 }
3240#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003241 } else {
3242 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003243 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003244 switch (endian) {
3245 case DEVICE_LITTLE_ENDIAN:
3246 val = lduw_le_p(ptr);
3247 break;
3248 case DEVICE_BIG_ENDIAN:
3249 val = lduw_be_p(ptr);
3250 break;
3251 default:
3252 val = lduw_p(ptr);
3253 break;
3254 }
Peter Maydell50013112015-04-26 16:49:24 +01003255 r = MEMTX_OK;
3256 }
3257 if (result) {
3258 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003259 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003260 if (release_lock) {
3261 qemu_mutex_unlock_iothread();
3262 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003263 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003264 return val;
bellardaab33092005-10-30 20:48:42 +00003265}
3266
Peter Maydell50013112015-04-26 16:49:24 +01003267uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3268 MemTxAttrs attrs, MemTxResult *result)
3269{
3270 return address_space_lduw_internal(as, addr, attrs, result,
3271 DEVICE_NATIVE_ENDIAN);
3272}
3273
3274uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3275 MemTxAttrs attrs, MemTxResult *result)
3276{
3277 return address_space_lduw_internal(as, addr, attrs, result,
3278 DEVICE_LITTLE_ENDIAN);
3279}
3280
3281uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3282 MemTxAttrs attrs, MemTxResult *result)
3283{
3284 return address_space_lduw_internal(as, addr, attrs, result,
3285 DEVICE_BIG_ENDIAN);
3286}
3287
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003288uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003289{
Peter Maydell50013112015-04-26 16:49:24 +01003290 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003291}
3292
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003293uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003294{
Peter Maydell50013112015-04-26 16:49:24 +01003295 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003296}
3297
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003298uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003299{
Peter Maydell50013112015-04-26 16:49:24 +01003300 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003301}
3302
bellard8df1cd02005-01-28 22:37:22 +00003303/* warning: addr must be aligned. The ram page is not masked as dirty
3304 and the code inside is not invalidated. It is useful if the dirty
3305 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003306void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3307 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003308{
bellard8df1cd02005-01-28 22:37:22 +00003309 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003310 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003311 hwaddr l = 4;
3312 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003313 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003314 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003315 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003316
Paolo Bonzini41063e12015-03-18 14:21:43 +01003317 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003318 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003319 true);
3320 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003321 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003322
Peter Maydell50013112015-04-26 16:49:24 +01003323 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003324 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003325 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003326 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003327
Paolo Bonzini845b6212015-03-23 11:45:53 +01003328 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3329 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003330 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3331 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003332 r = MEMTX_OK;
3333 }
3334 if (result) {
3335 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003336 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003337 if (release_lock) {
3338 qemu_mutex_unlock_iothread();
3339 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003340 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003341}
3342
Peter Maydell50013112015-04-26 16:49:24 +01003343void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3344{
3345 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3346}
3347
bellard8df1cd02005-01-28 22:37:22 +00003348/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003349static inline void address_space_stl_internal(AddressSpace *as,
3350 hwaddr addr, uint32_t val,
3351 MemTxAttrs attrs,
3352 MemTxResult *result,
3353 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003354{
bellard8df1cd02005-01-28 22:37:22 +00003355 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003356 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003357 hwaddr l = 4;
3358 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003359 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003360 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003361
Paolo Bonzini41063e12015-03-18 14:21:43 +01003362 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003363 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003364 true);
3365 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003366 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003367
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368#if defined(TARGET_WORDS_BIGENDIAN)
3369 if (endian == DEVICE_LITTLE_ENDIAN) {
3370 val = bswap32(val);
3371 }
3372#else
3373 if (endian == DEVICE_BIG_ENDIAN) {
3374 val = bswap32(val);
3375 }
3376#endif
Peter Maydell50013112015-04-26 16:49:24 +01003377 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003378 } else {
bellard8df1cd02005-01-28 22:37:22 +00003379 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003380 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003381 switch (endian) {
3382 case DEVICE_LITTLE_ENDIAN:
3383 stl_le_p(ptr, val);
3384 break;
3385 case DEVICE_BIG_ENDIAN:
3386 stl_be_p(ptr, val);
3387 break;
3388 default:
3389 stl_p(ptr, val);
3390 break;
3391 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003392 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003393 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003394 }
Peter Maydell50013112015-04-26 16:49:24 +01003395 if (result) {
3396 *result = r;
3397 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003398 if (release_lock) {
3399 qemu_mutex_unlock_iothread();
3400 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003401 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003402}
3403
3404void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3405 MemTxAttrs attrs, MemTxResult *result)
3406{
3407 address_space_stl_internal(as, addr, val, attrs, result,
3408 DEVICE_NATIVE_ENDIAN);
3409}
3410
3411void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3412 MemTxAttrs attrs, MemTxResult *result)
3413{
3414 address_space_stl_internal(as, addr, val, attrs, result,
3415 DEVICE_LITTLE_ENDIAN);
3416}
3417
3418void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3419 MemTxAttrs attrs, MemTxResult *result)
3420{
3421 address_space_stl_internal(as, addr, val, attrs, result,
3422 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003423}
3424
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003425void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003426{
Peter Maydell50013112015-04-26 16:49:24 +01003427 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003428}
3429
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003430void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003431{
Peter Maydell50013112015-04-26 16:49:24 +01003432 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003433}
3434
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003435void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003436{
Peter Maydell50013112015-04-26 16:49:24 +01003437 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003438}
3439
bellardaab33092005-10-30 20:48:42 +00003440/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003441void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3442 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003443{
3444 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003445 MemTxResult r;
3446
3447 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3448 if (result) {
3449 *result = r;
3450 }
3451}
3452
3453void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3454{
3455 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003456}
3457
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003458/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003459static inline void address_space_stw_internal(AddressSpace *as,
3460 hwaddr addr, uint32_t val,
3461 MemTxAttrs attrs,
3462 MemTxResult *result,
3463 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003464{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003465 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003466 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003467 hwaddr l = 2;
3468 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003469 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003470 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003471
Paolo Bonzini41063e12015-03-18 14:21:43 +01003472 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003473 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003474 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003475 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003476
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003477#if defined(TARGET_WORDS_BIGENDIAN)
3478 if (endian == DEVICE_LITTLE_ENDIAN) {
3479 val = bswap16(val);
3480 }
3481#else
3482 if (endian == DEVICE_BIG_ENDIAN) {
3483 val = bswap16(val);
3484 }
3485#endif
Peter Maydell50013112015-04-26 16:49:24 +01003486 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003487 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003488 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003489 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003490 switch (endian) {
3491 case DEVICE_LITTLE_ENDIAN:
3492 stw_le_p(ptr, val);
3493 break;
3494 case DEVICE_BIG_ENDIAN:
3495 stw_be_p(ptr, val);
3496 break;
3497 default:
3498 stw_p(ptr, val);
3499 break;
3500 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003501 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003502 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003503 }
Peter Maydell50013112015-04-26 16:49:24 +01003504 if (result) {
3505 *result = r;
3506 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003507 if (release_lock) {
3508 qemu_mutex_unlock_iothread();
3509 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003510 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003511}
3512
3513void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3514 MemTxAttrs attrs, MemTxResult *result)
3515{
3516 address_space_stw_internal(as, addr, val, attrs, result,
3517 DEVICE_NATIVE_ENDIAN);
3518}
3519
3520void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3521 MemTxAttrs attrs, MemTxResult *result)
3522{
3523 address_space_stw_internal(as, addr, val, attrs, result,
3524 DEVICE_LITTLE_ENDIAN);
3525}
3526
3527void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3528 MemTxAttrs attrs, MemTxResult *result)
3529{
3530 address_space_stw_internal(as, addr, val, attrs, result,
3531 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003532}
3533
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003534void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003535{
Peter Maydell50013112015-04-26 16:49:24 +01003536 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003537}
3538
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003539void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003540{
Peter Maydell50013112015-04-26 16:49:24 +01003541 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003542}
3543
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003544void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003545{
Peter Maydell50013112015-04-26 16:49:24 +01003546 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003547}
3548
bellardaab33092005-10-30 20:48:42 +00003549/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003550void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3551 MemTxAttrs attrs, MemTxResult *result)
3552{
3553 MemTxResult r;
3554 val = tswap64(val);
3555 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3556 if (result) {
3557 *result = r;
3558 }
3559}
3560
3561void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3562 MemTxAttrs attrs, MemTxResult *result)
3563{
3564 MemTxResult r;
3565 val = cpu_to_le64(val);
3566 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3567 if (result) {
3568 *result = r;
3569 }
3570}
3571void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3572 MemTxAttrs attrs, MemTxResult *result)
3573{
3574 MemTxResult r;
3575 val = cpu_to_be64(val);
3576 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3577 if (result) {
3578 *result = r;
3579 }
3580}
3581
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003582void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003583{
Peter Maydell50013112015-04-26 16:49:24 +01003584 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003585}
3586
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003587void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003588{
Peter Maydell50013112015-04-26 16:49:24 +01003589 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003590}
3591
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003592void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003593{
Peter Maydell50013112015-04-26 16:49:24 +01003594 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003595}
3596
aliguori5e2972f2009-03-28 17:51:36 +00003597/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003598int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003599 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003600{
3601 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003602 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003603 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003604
3605 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003606 int asidx;
3607 MemTxAttrs attrs;
3608
bellard13eb76e2004-01-24 15:23:36 +00003609 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003610 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3611 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003612 /* if no physical page mapped, return an error */
3613 if (phys_addr == -1)
3614 return -1;
3615 l = (page + TARGET_PAGE_SIZE) - addr;
3616 if (l > len)
3617 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003618 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003619 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003620 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3621 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003622 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003623 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3624 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003625 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003626 }
bellard13eb76e2004-01-24 15:23:36 +00003627 len -= l;
3628 buf += l;
3629 addr += l;
3630 }
3631 return 0;
3632}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003633
3634/*
3635 * Allows code that needs to deal with migration bitmaps etc to still be built
3636 * target independent.
3637 */
3638size_t qemu_target_page_bits(void)
3639{
3640 return TARGET_PAGE_BITS;
3641}
3642
Paul Brooka68fe892010-03-01 00:08:59 +00003643#endif
bellard13eb76e2004-01-24 15:23:36 +00003644
Blue Swirl8e4a4242013-01-06 18:30:17 +00003645/*
3646 * A helper function for the _utterly broken_ virtio device model to find out if
3647 * it's running on a big endian machine. Don't do this at home kids!
3648 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003649bool target_words_bigendian(void);
3650bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003651{
3652#if defined(TARGET_WORDS_BIGENDIAN)
3653 return true;
3654#else
3655 return false;
3656#endif
3657}
3658
Wen Congyang76f35532012-05-07 12:04:18 +08003659#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003660bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003661{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003662 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003663 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003664 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003665
Paolo Bonzini41063e12015-03-18 14:21:43 +01003666 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003667 mr = address_space_translate(&address_space_memory,
3668 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003669
Paolo Bonzini41063e12015-03-18 14:21:43 +01003670 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3671 rcu_read_unlock();
3672 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003673}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003674
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003675int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003676{
3677 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003678 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003679
Mike Day0dc3f442013-09-05 14:41:35 -04003680 rcu_read_lock();
3681 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003682 ret = func(block->idstr, block->host, block->offset,
3683 block->used_length, opaque);
3684 if (ret) {
3685 break;
3686 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003687 }
Mike Day0dc3f442013-09-05 14:41:35 -04003688 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003689 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003690}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003691#endif