blob: c8389f93c3a382b637b7f9eefb0c029a4f1a9de0 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Peter Lieven101420b2016-07-15 12:03:50 +0200190 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200195 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197}
198
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200{
201 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200202 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 PhysPageEntry e;
204 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210
211 e.skip = leaf ? 0 : 1;
212 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200214 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200217}
218
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200219static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
220 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200221 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222{
223 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200226 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200229 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200231
Paolo Bonzini03f49952013-11-07 17:14:36 +0100232 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200234 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200235 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200236 *index += step;
237 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200238 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200239 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200240 }
241 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200242 }
243}
244
Avi Kivityac1970f2012-10-03 16:22:53 +0200245static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200246 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200247 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000248{
Avi Kivity29990972012-02-13 20:21:20 +0200249 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000251
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200252 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000253}
254
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200255/* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
257 */
258static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
259{
260 unsigned valid_ptr = P_L2_SIZE;
261 int valid = 0;
262 PhysPageEntry *p;
263 int i;
264
265 if (lp->ptr == PHYS_MAP_NODE_NIL) {
266 return;
267 }
268
269 p = nodes[lp->ptr];
270 for (i = 0; i < P_L2_SIZE; i++) {
271 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272 continue;
273 }
274
275 valid_ptr = i;
276 valid++;
277 if (p[i].skip) {
278 phys_page_compact(&p[i], nodes, compacted);
279 }
280 }
281
282 /* We can only compress if there's only one child. */
283 if (valid != 1) {
284 return;
285 }
286
287 assert(valid_ptr < P_L2_SIZE);
288
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291 return;
292 }
293
294 lp->ptr = p[valid_ptr].ptr;
295 if (!p[valid_ptr].skip) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
300 * change this rule.
301 */
302 lp->skip = 0;
303 } else {
304 lp->skip += p[valid_ptr].skip;
305 }
306}
307
308static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309{
310 DECLARE_BITMAP(compacted, nodes_nb);
311
312 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200313 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200314 }
315}
316
Fam Zheng29cb5332016-03-01 14:18:23 +0800317static inline bool section_covers_addr(const MemoryRegionSection *section,
318 hwaddr addr)
319{
320 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
321 * the section must cover the entire address space.
322 */
323 return section->size.hi ||
324 range_covers_byte(section->offset_within_address_space,
325 section->size.lo, addr);
326}
327
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200328static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200329 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000330{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200332 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200334
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200335 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200336 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200338 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200339 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100340 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200341 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342
Fam Zheng29cb5332016-03-01 14:18:23 +0800343 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 return &sections[lp.ptr];
345 } else {
346 return &sections[PHYS_SECTION_UNASSIGNED];
347 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200348}
349
Blue Swirle5548612012-04-21 13:08:33 +0000350bool memory_region_is_unassigned(MemoryRegion *mr)
351{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200352 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000353 && mr != &io_mem_watch;
354}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200355
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100356/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200357static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200358 hwaddr addr,
359 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200360{
Fam Zheng729633c2016-03-01 14:18:24 +0800361 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800363 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200364
Fam Zheng729633c2016-03-01 14:18:24 +0800365 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
366 section_covers_addr(section, addr)) {
367 update = false;
368 } else {
369 section = phys_page_find(d->phys_map, addr, d->map.nodes,
370 d->map.sections);
371 update = true;
372 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 if (resolve_subpage && section->mr->subpage) {
374 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200375 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200376 }
Fam Zheng729633c2016-03-01 14:18:24 +0800377 if (update) {
378 atomic_set(&d->mru_section, section);
379 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200381}
382
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100383/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200384static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200385address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200386 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387{
388 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100390 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200392 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 /* Compute offset within MemoryRegionSection */
394 addr -= section->offset_within_address_space;
395
396 /* Compute offset within MemoryRegion */
397 *xlat = addr + section->offset_within_region;
398
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200399 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200400
401 /* MMIO registers can be expected to perform full-width accesses based only
402 * on their address, without considering adjacent registers that could
403 * decode to completely different MemoryRegions. When such registers
404 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405 * regions overlap wildly. For this reason we cannot clamp the accesses
406 * here.
407 *
408 * If the length is small (as is the case for address_space_ldl/stl),
409 * everything works fine. If the incoming length is large, however,
410 * the caller really has to do the clamping through memory_access_size.
411 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200413 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200414 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
415 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200416 return section;
417}
Jan Kiszka90260c62013-05-26 21:46:51 +0200418
Paolo Bonzini41063e12015-03-18 14:21:43 +0100419/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200420MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
421 hwaddr *xlat, hwaddr *plen,
422 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200423{
Avi Kivity30951152012-10-30 13:47:46 +0200424 IOMMUTLBEntry iotlb;
425 MemoryRegionSection *section;
426 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200427
428 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100429 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
430 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200431 mr = section->mr;
432
433 if (!mr->iommu_ops) {
434 break;
435 }
436
Le Tan8d7b8cb2014-08-16 13:55:37 +0800437 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200438 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
439 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200441 if (!(iotlb.perm & (1 << is_write))) {
442 mr = &io_mem_unassigned;
443 break;
444 }
445
446 as = iotlb.target_as;
447 }
448
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000449 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700451 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100452 }
453
Avi Kivity30951152012-10-30 13:47:46 +0200454 *xlat = addr;
455 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200456}
457
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100458/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200459MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000460address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200461 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200462{
Avi Kivity30951152012-10-30 13:47:46 +0200463 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000464 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
465
466 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200467
468 assert(!section->mr->iommu_ops);
469 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200470}
bellard9fa3e852004-01-04 18:06:42 +0000471#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000472
Andreas Färberb170fce2013-01-20 20:23:22 +0100473#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000474
Juan Quintelae59fb372009-09-29 22:48:21 +0200475static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476{
Andreas Färber259186a2013-01-17 18:51:17 +0100477 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200478
aurel323098dba2009-03-07 21:28:24 +0000479 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
480 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100481 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100482 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000483
484 return 0;
485}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487static int cpu_common_pre_load(void *opaque)
488{
489 CPUState *cpu = opaque;
490
Paolo Bonziniadee6422014-12-19 12:53:14 +0100491 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400492
493 return 0;
494}
495
496static bool cpu_common_exception_index_needed(void *opaque)
497{
498 CPUState *cpu = opaque;
499
Paolo Bonziniadee6422014-12-19 12:53:14 +0100500 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400501}
502
503static const VMStateDescription vmstate_cpu_common_exception_index = {
504 .name = "cpu_common/exception_index",
505 .version_id = 1,
506 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200507 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400508 .fields = (VMStateField[]) {
509 VMSTATE_INT32(exception_index, CPUState),
510 VMSTATE_END_OF_LIST()
511 }
512};
513
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300514static bool cpu_common_crash_occurred_needed(void *opaque)
515{
516 CPUState *cpu = opaque;
517
518 return cpu->crash_occurred;
519}
520
521static const VMStateDescription vmstate_cpu_common_crash_occurred = {
522 .name = "cpu_common/crash_occurred",
523 .version_id = 1,
524 .minimum_version_id = 1,
525 .needed = cpu_common_crash_occurred_needed,
526 .fields = (VMStateField[]) {
527 VMSTATE_BOOL(crash_occurred, CPUState),
528 VMSTATE_END_OF_LIST()
529 }
530};
531
Andreas Färber1a1562f2013-06-17 04:09:11 +0200532const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200533 .name = "cpu_common",
534 .version_id = 1,
535 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200537 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200538 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100539 VMSTATE_UINT32(halted, CPUState),
540 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400542 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 .subsections = (const VMStateDescription*[]) {
544 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300545 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200546 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200547 }
548};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200549
pbrook9656f322008-07-01 20:01:19 +0000550#endif
551
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100552CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400553{
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200558 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100559 }
Glauber Costa950f1472009-06-09 12:15:18 -0400560 }
561
Andreas Färberbdc44642013-06-24 23:50:24 +0200562 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400563}
564
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000566void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000567{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000568 CPUAddressSpace *newas;
569
570 /* Target code should have set num_ases before calling us */
571 assert(asidx < cpu->num_ases);
572
Peter Maydell56943e82016-01-21 14:15:04 +0000573 if (asidx == 0) {
574 /* address space 0 gets the convenience alias */
575 cpu->as = as;
576 }
577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 /* KVM cannot currently support multiple address spaces. */
579 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000581 if (!cpu->cpu_ases) {
582 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000583 }
Peter Maydell32857f42015-10-01 15:29:50 +0100584
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000585 newas = &cpu->cpu_ases[asidx];
586 newas->cpu = cpu;
587 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000588 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000589 newas->tcg_as_listener.commit = tcg_commit;
590 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000591 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000592}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000593
594AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
595{
596 /* Return the AddressSpace corresponding to the specified index */
597 return cpu->cpu_ases[asidx].as;
598}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599#endif
600
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530601void cpu_exec_exit(CPUState *cpu)
602{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530603 CPUClass *cc = CPU_GET_CLASS(cpu);
604
Paolo Bonzini267f6852016-08-28 03:45:14 +0200605 cpu_list_remove(cpu);
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530606
607 if (cc->vmsd != NULL) {
608 vmstate_unregister(NULL, cc->vmsd, cpu);
609 }
610 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
611 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
612 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530613}
614
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700615void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000616{
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200617 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Igor Mammedova07f9532016-07-25 11:59:21 +0200618 Error *local_err ATTRIBUTE_UNUSED = NULL;
bellard6a00d602005-11-21 23:25:50 +0000619
Peter Maydell56943e82016-01-21 14:15:04 +0000620 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000621 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000622
Eduardo Habkost291135b2015-04-27 17:00:33 -0300623#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300624 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000625
626 /* This is a softmmu CPU object, so create a property for it
627 * so users can wire up its memory. (This can't go in qom/cpu.c
628 * because that file is compiled only once for both user-mode
629 * and system builds.) The default if no link is set up is to use
630 * the system address space.
631 */
632 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
633 (Object **)&cpu->memory,
634 qdev_prop_allow_set_link_before_realize,
635 OBJ_PROP_LINK_UNREF_ON_RELEASE,
636 &error_abort);
637 cpu->memory = system_memory;
638 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300639#endif
640
Paolo Bonzini267f6852016-08-28 03:45:14 +0200641 cpu_list_add(cpu);
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200642
643#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200644 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200645 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200646 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100647 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200648 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100649 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200650#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000651}
652
Paul Brook94df27f2010-02-28 23:47:45 +0000653#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200654static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000655{
656 tb_invalidate_phys_page_range(pc, pc + 1, 0);
657}
658#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200659static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400660{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000661 MemTxAttrs attrs;
662 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
663 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400664 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000665 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100666 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400667 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400668}
bellardc27004e2005-01-03 23:35:10 +0000669#endif
bellardd720b932004-04-25 17:57:43 +0000670
Paul Brookc527ee82010-03-01 03:31:14 +0000671#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200672void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000673
674{
675}
676
Peter Maydell3ee887e2014-09-12 14:06:48 +0100677int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
678 int flags)
679{
680 return -ENOSYS;
681}
682
683void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
684{
685}
686
Andreas Färber75a34032013-09-02 16:57:02 +0200687int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000688 int flags, CPUWatchpoint **watchpoint)
689{
690 return -ENOSYS;
691}
692#else
pbrook6658ffb2007-03-16 23:58:11 +0000693/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200694int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000695 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000696{
aliguoric0ce9982008-11-25 22:13:57 +0000697 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000698
Peter Maydell05068c02014-09-12 14:06:48 +0100699 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700700 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200701 error_report("tried to set invalid watchpoint at %"
702 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000703 return -EINVAL;
704 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500705 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000706
aliguoria1d1bb32008-11-18 20:07:32 +0000707 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100708 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000709 wp->flags = flags;
710
aliguori2dc9f412008-11-18 20:56:59 +0000711 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200712 if (flags & BP_GDB) {
713 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
714 } else {
715 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
716 }
aliguoria1d1bb32008-11-18 20:07:32 +0000717
Andreas Färber31b030d2013-09-04 01:29:02 +0200718 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000719
720 if (watchpoint)
721 *watchpoint = wp;
722 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000723}
724
aliguoria1d1bb32008-11-18 20:07:32 +0000725/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200726int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000727 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000728{
aliguoria1d1bb32008-11-18 20:07:32 +0000729 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000730
Andreas Färberff4700b2013-08-26 18:23:18 +0200731 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100732 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000733 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200734 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000735 return 0;
736 }
737 }
aliguoria1d1bb32008-11-18 20:07:32 +0000738 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000739}
740
aliguoria1d1bb32008-11-18 20:07:32 +0000741/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200742void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000743{
Andreas Färberff4700b2013-08-26 18:23:18 +0200744 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000745
Andreas Färber31b030d2013-09-04 01:29:02 +0200746 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000747
Anthony Liguori7267c092011-08-20 22:09:37 -0500748 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000749}
750
aliguoria1d1bb32008-11-18 20:07:32 +0000751/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200752void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000753{
aliguoric0ce9982008-11-25 22:13:57 +0000754 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000755
Andreas Färberff4700b2013-08-26 18:23:18 +0200756 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200757 if (wp->flags & mask) {
758 cpu_watchpoint_remove_by_ref(cpu, wp);
759 }
aliguoric0ce9982008-11-25 22:13:57 +0000760 }
aliguoria1d1bb32008-11-18 20:07:32 +0000761}
Peter Maydell05068c02014-09-12 14:06:48 +0100762
763/* Return true if this watchpoint address matches the specified
764 * access (ie the address range covered by the watchpoint overlaps
765 * partially or completely with the address range covered by the
766 * access).
767 */
768static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
769 vaddr addr,
770 vaddr len)
771{
772 /* We know the lengths are non-zero, but a little caution is
773 * required to avoid errors in the case where the range ends
774 * exactly at the top of the address space and so addr + len
775 * wraps round to zero.
776 */
777 vaddr wpend = wp->vaddr + wp->len - 1;
778 vaddr addrend = addr + len - 1;
779
780 return !(addr > wpend || wp->vaddr > addrend);
781}
782
Paul Brookc527ee82010-03-01 03:31:14 +0000783#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000784
785/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200786int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000787 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000788{
aliguoric0ce9982008-11-25 22:13:57 +0000789 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000790
Anthony Liguori7267c092011-08-20 22:09:37 -0500791 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000792
793 bp->pc = pc;
794 bp->flags = flags;
795
aliguori2dc9f412008-11-18 20:56:59 +0000796 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200797 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200798 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200799 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200800 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200801 }
aliguoria1d1bb32008-11-18 20:07:32 +0000802
Andreas Färberf0c3c502013-08-26 21:22:53 +0200803 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000804
Andreas Färber00b941e2013-06-29 18:55:54 +0200805 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000806 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200807 }
aliguoria1d1bb32008-11-18 20:07:32 +0000808 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000809}
810
811/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200812int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000813{
aliguoria1d1bb32008-11-18 20:07:32 +0000814 CPUBreakpoint *bp;
815
Andreas Färberf0c3c502013-08-26 21:22:53 +0200816 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000817 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200818 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000819 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000820 }
bellard4c3a88a2003-07-26 12:06:08 +0000821 }
aliguoria1d1bb32008-11-18 20:07:32 +0000822 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000823}
824
aliguoria1d1bb32008-11-18 20:07:32 +0000825/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200826void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000827{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200828 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
829
830 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000831
Anthony Liguori7267c092011-08-20 22:09:37 -0500832 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000833}
834
835/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200836void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000837{
aliguoric0ce9982008-11-25 22:13:57 +0000838 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000839
Andreas Färberf0c3c502013-08-26 21:22:53 +0200840 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200841 if (bp->flags & mask) {
842 cpu_breakpoint_remove_by_ref(cpu, bp);
843 }
aliguoric0ce9982008-11-25 22:13:57 +0000844 }
bellard4c3a88a2003-07-26 12:06:08 +0000845}
846
bellardc33a3462003-07-29 20:50:33 +0000847/* enable or disable single step mode. EXCP_DEBUG is returned by the
848 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200849void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000850{
Andreas Färbered2803d2013-06-21 20:20:45 +0200851 if (cpu->singlestep_enabled != enabled) {
852 cpu->singlestep_enabled = enabled;
853 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200854 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200855 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100856 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000857 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700858 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000859 }
bellardc33a3462003-07-29 20:50:33 +0000860 }
bellardc33a3462003-07-29 20:50:33 +0000861}
862
Andreas Färbera47dddd2013-09-03 17:38:47 +0200863void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000864{
865 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000866 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000867
868 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000869 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000870 fprintf(stderr, "qemu: fatal: ");
871 vfprintf(stderr, fmt, ap);
872 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200873 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100874 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000875 qemu_log("qemu: fatal: ");
876 qemu_log_vprintf(fmt, ap2);
877 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200878 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000879 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000880 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000881 }
pbrook493ae1f2007-11-23 16:53:59 +0000882 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000883 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300884 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200885#if defined(CONFIG_USER_ONLY)
886 {
887 struct sigaction act;
888 sigfillset(&act.sa_mask);
889 act.sa_handler = SIG_DFL;
890 sigaction(SIGABRT, &act, NULL);
891 }
892#endif
bellard75012672003-06-21 13:11:07 +0000893 abort();
894}
895
bellard01243112004-01-04 15:48:17 +0000896#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400897/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200898static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
899{
900 RAMBlock *block;
901
Paolo Bonzini43771532013-09-09 17:58:40 +0200902 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200903 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200904 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200905 }
Mike Day0dc3f442013-09-05 14:41:35 -0400906 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200907 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200908 goto found;
909 }
910 }
911
912 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
913 abort();
914
915found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200916 /* It is safe to write mru_block outside the iothread lock. This
917 * is what happens:
918 *
919 * mru_block = xxx
920 * rcu_read_unlock()
921 * xxx removed from list
922 * rcu_read_lock()
923 * read mru_block
924 * mru_block = NULL;
925 * call_rcu(reclaim_ramblock, xxx);
926 * rcu_read_unlock()
927 *
928 * atomic_rcu_set is not needed here. The block was already published
929 * when it was placed into the list. Here we're just making an extra
930 * copy of the pointer.
931 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200932 ram_list.mru_block = block;
933 return block;
934}
935
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200936static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000937{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700938 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200940 RAMBlock *block;
941 ram_addr_t end;
942
943 end = TARGET_PAGE_ALIGN(start + length);
944 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000945
Mike Day0dc3f442013-09-05 14:41:35 -0400946 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200947 block = qemu_get_ram_block(start);
948 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200949 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700950 CPU_FOREACH(cpu) {
951 tlb_reset_dirty(cpu, start1, length);
952 }
Mike Day0dc3f442013-09-05 14:41:35 -0400953 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200954}
955
956/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000957bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
958 ram_addr_t length,
959 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200960{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000961 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000962 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000963 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200964
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000965 if (length == 0) {
966 return false;
967 }
968
969 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
970 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000971
972 rcu_read_lock();
973
974 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
975
976 while (page < end) {
977 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
978 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
979 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
980
981 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
982 offset, num);
983 page += num;
984 }
985
986 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000987
988 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200989 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200990 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000991
992 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000993}
994
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100995/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200996hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200997 MemoryRegionSection *section,
998 target_ulong vaddr,
999 hwaddr paddr, hwaddr xlat,
1000 int prot,
1001 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001002{
Avi Kivitya8170e52012-10-23 12:30:10 +02001003 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001004 CPUWatchpoint *wp;
1005
Blue Swirlcc5bea62012-04-14 14:56:48 +00001006 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001007 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001008 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001009 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001010 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001011 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001012 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001013 }
1014 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001015 AddressSpaceDispatch *d;
1016
1017 d = atomic_rcu_read(&section->address_space->dispatch);
1018 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001019 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001020 }
1021
1022 /* Make accesses to pages with watchpoints go via the
1023 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001024 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001025 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001026 /* Avoid trapping reads of pages with a write breakpoint. */
1027 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001028 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001029 *address |= TLB_MMIO;
1030 break;
1031 }
1032 }
1033 }
1034
1035 return iotlb;
1036}
bellard9fa3e852004-01-04 18:06:42 +00001037#endif /* defined(CONFIG_USER_ONLY) */
1038
pbrooke2eef172008-06-08 01:09:01 +00001039#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001040
Anthony Liguoric227f092009-10-01 16:12:16 -05001041static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001042 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001043static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001044
Igor Mammedova2b257d2014-10-31 16:38:37 +00001045static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1046 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001047
1048/*
1049 * Set a custom physical guest memory alloator.
1050 * Accelerators with unusual needs may need this. Hopefully, we can
1051 * get rid of it eventually.
1052 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001053void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001054{
1055 phys_mem_alloc = alloc;
1056}
1057
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001058static uint16_t phys_section_add(PhysPageMap *map,
1059 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001060{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001061 /* The physical section number is ORed with a page-aligned
1062 * pointer to produce the iotlb entries. Thus it should
1063 * never overflow into the page-aligned value.
1064 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001065 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001066
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001067 if (map->sections_nb == map->sections_nb_alloc) {
1068 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1069 map->sections = g_renew(MemoryRegionSection, map->sections,
1070 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001071 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001072 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001073 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001074 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001075}
1076
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001077static void phys_section_destroy(MemoryRegion *mr)
1078{
Don Slutz55b4e802015-11-30 17:11:04 -05001079 bool have_sub_page = mr->subpage;
1080
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001081 memory_region_unref(mr);
1082
Don Slutz55b4e802015-11-30 17:11:04 -05001083 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001084 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001085 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001086 g_free(subpage);
1087 }
1088}
1089
Paolo Bonzini60926662013-05-29 12:30:26 +02001090static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001091{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001092 while (map->sections_nb > 0) {
1093 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001094 phys_section_destroy(section->mr);
1095 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001096 g_free(map->sections);
1097 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001098}
1099
Avi Kivityac1970f2012-10-03 16:22:53 +02001100static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001101{
1102 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001103 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001104 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001105 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107 MemoryRegionSection subsection = {
1108 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001109 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001110 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001111 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001112
Avi Kivityf3705d52012-03-08 16:16:34 +02001113 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001114
Avi Kivityf3705d52012-03-08 16:16:34 +02001115 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001116 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001117 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001118 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001119 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001120 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001121 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001122 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001123 }
1124 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001125 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001126 subpage_register(subpage, start, end,
1127 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001128}
1129
1130
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001131static void register_multipage(AddressSpaceDispatch *d,
1132 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001133{
Avi Kivitya8170e52012-10-23 12:30:10 +02001134 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001135 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001136 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1137 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001138
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001139 assert(num_pages);
1140 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001141}
1142
Avi Kivityac1970f2012-10-03 16:22:53 +02001143static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001145 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001146 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001147 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001149
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001150 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1151 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1152 - now.offset_within_address_space;
1153
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001154 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001155 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001156 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001157 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001158 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001159 while (int128_ne(remain.size, now.size)) {
1160 remain.size = int128_sub(remain.size, now.size);
1161 remain.offset_within_address_space += int128_get64(now.size);
1162 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001163 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001164 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001165 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001166 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001167 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001168 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001169 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001170 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001171 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001172 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001173 }
1174}
1175
Sheng Yang62a27442010-01-26 19:21:16 +08001176void qemu_flush_coalesced_mmio_buffer(void)
1177{
1178 if (kvm_enabled())
1179 kvm_flush_coalesced_mmio_buffer();
1180}
1181
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001182void qemu_mutex_lock_ramlist(void)
1183{
1184 qemu_mutex_lock(&ram_list.mutex);
1185}
1186
1187void qemu_mutex_unlock_ramlist(void)
1188{
1189 qemu_mutex_unlock(&ram_list.mutex);
1190}
1191
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001192#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001193static void *file_ram_alloc(RAMBlock *block,
1194 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001195 const char *path,
1196 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001197{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001198 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001200 char *sanitized_name;
1201 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001202 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001203 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001204 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001205
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001206 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1207 error_setg(errp,
1208 "host lacks kvm mmu notifiers, -mem-path unsupported");
1209 return NULL;
1210 }
1211
1212 for (;;) {
1213 fd = open(path, O_RDWR);
1214 if (fd >= 0) {
1215 /* @path names an existing file, use it */
1216 break;
1217 }
1218 if (errno == ENOENT) {
1219 /* @path names a file that doesn't exist, create it */
1220 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1221 if (fd >= 0) {
1222 unlink_on_error = true;
1223 break;
1224 }
1225 } else if (errno == EISDIR) {
1226 /* @path names a directory, create a file there */
1227 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1228 sanitized_name = g_strdup(memory_region_name(block->mr));
1229 for (c = sanitized_name; *c != '\0'; c++) {
1230 if (*c == '/') {
1231 *c = '_';
1232 }
1233 }
1234
1235 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1236 sanitized_name);
1237 g_free(sanitized_name);
1238
1239 fd = mkstemp(filename);
1240 if (fd >= 0) {
1241 unlink(filename);
1242 g_free(filename);
1243 break;
1244 }
1245 g_free(filename);
1246 }
1247 if (errno != EEXIST && errno != EINTR) {
1248 error_setg_errno(errp, errno,
1249 "can't open backing store %s for guest RAM",
1250 path);
1251 goto error;
1252 }
1253 /*
1254 * Try again on EINTR and EEXIST. The latter happens when
1255 * something else creates the file between our two open().
1256 */
1257 }
1258
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001259 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001260 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001261
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001262 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001263 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001264 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001265 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001266 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001267 }
1268
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001269 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001270
1271 /*
1272 * ftruncate is not supported by hugetlbfs in older
1273 * hosts, so don't bother bailing out on errors.
1274 * If anything goes wrong with it under other filesystems,
1275 * mmap will fail.
1276 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001277 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001278 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001279 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001280
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001281 area = qemu_ram_mmap(fd, memory, block->mr->align,
1282 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001283 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001284 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001285 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001286 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001287 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001288
1289 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001290 os_mem_prealloc(fd, area, memory, errp);
1291 if (errp && *errp) {
1292 goto error;
1293 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001294 }
1295
Alex Williamson04b16652010-07-02 11:13:17 -06001296 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001297 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001298
1299error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001300 if (area != MAP_FAILED) {
1301 qemu_ram_munmap(area, memory);
1302 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001303 if (unlink_on_error) {
1304 unlink(path);
1305 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001306 if (fd != -1) {
1307 close(fd);
1308 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001309 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001310}
1311#endif
1312
Mike Day0dc3f442013-09-05 14:41:35 -04001313/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001314static ram_addr_t find_ram_offset(ram_addr_t size)
1315{
Alex Williamson04b16652010-07-02 11:13:17 -06001316 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001317 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001318
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001319 assert(size != 0); /* it would hand out same offset multiple times */
1320
Mike Day0dc3f442013-09-05 14:41:35 -04001321 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001322 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001323 }
Alex Williamson04b16652010-07-02 11:13:17 -06001324
Mike Day0dc3f442013-09-05 14:41:35 -04001325 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001326 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001327
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001328 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001329
Mike Day0dc3f442013-09-05 14:41:35 -04001330 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001331 if (next_block->offset >= end) {
1332 next = MIN(next, next_block->offset);
1333 }
1334 }
1335 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001336 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001337 mingap = next - end;
1338 }
1339 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001340
1341 if (offset == RAM_ADDR_MAX) {
1342 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1343 (uint64_t)size);
1344 abort();
1345 }
1346
Alex Williamson04b16652010-07-02 11:13:17 -06001347 return offset;
1348}
1349
Juan Quintela652d7ec2012-07-20 10:37:54 +02001350ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001351{
Alex Williamsond17b5282010-06-25 11:08:38 -06001352 RAMBlock *block;
1353 ram_addr_t last = 0;
1354
Mike Day0dc3f442013-09-05 14:41:35 -04001355 rcu_read_lock();
1356 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001357 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001358 }
Mike Day0dc3f442013-09-05 14:41:35 -04001359 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001360 return last;
1361}
1362
Jason Baronddb97f12012-08-02 15:44:16 -04001363static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1364{
1365 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001366
1367 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001368 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001369 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1370 if (ret) {
1371 perror("qemu_madvise");
1372 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1373 "but dump_guest_core=off specified\n");
1374 }
1375 }
1376}
1377
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001378const char *qemu_ram_get_idstr(RAMBlock *rb)
1379{
1380 return rb->idstr;
1381}
1382
Mike Dayae3a7042013-09-05 14:41:35 -04001383/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001384void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001385{
Gongleifa53a0e2016-05-10 10:04:59 +08001386 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001387
Avi Kivityc5705a72011-12-20 15:59:12 +02001388 assert(new_block);
1389 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001390
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001391 if (dev) {
1392 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001393 if (id) {
1394 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001395 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001396 }
1397 }
1398 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1399
Gongleiab0a9952016-05-10 10:05:00 +08001400 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001401 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001402 if (block != new_block &&
1403 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001404 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1405 new_block->idstr);
1406 abort();
1407 }
1408 }
Mike Day0dc3f442013-09-05 14:41:35 -04001409 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001410}
1411
Mike Dayae3a7042013-09-05 14:41:35 -04001412/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001413void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001414{
Mike Dayae3a7042013-09-05 14:41:35 -04001415 /* FIXME: arch_init.c assumes that this is not called throughout
1416 * migration. Ignore the problem since hot-unplug during migration
1417 * does not work anyway.
1418 */
Hu Tao20cfe882014-04-02 15:13:26 +08001419 if (block) {
1420 memset(block->idstr, 0, sizeof(block->idstr));
1421 }
1422}
1423
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001424static int memory_try_enable_merging(void *addr, size_t len)
1425{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001426 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001427 /* disabled by the user */
1428 return 0;
1429 }
1430
1431 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1432}
1433
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001434/* Only legal before guest might have detected the memory size: e.g. on
1435 * incoming migration, or right after reset.
1436 *
1437 * As memory core doesn't know how is memory accessed, it is up to
1438 * resize callback to update device state and/or add assertions to detect
1439 * misuse, if necessary.
1440 */
Gongleifa53a0e2016-05-10 10:04:59 +08001441int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001442{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001443 assert(block);
1444
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001445 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001446
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001447 if (block->used_length == newsize) {
1448 return 0;
1449 }
1450
1451 if (!(block->flags & RAM_RESIZEABLE)) {
1452 error_setg_errno(errp, EINVAL,
1453 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1454 " in != 0x" RAM_ADDR_FMT, block->idstr,
1455 newsize, block->used_length);
1456 return -EINVAL;
1457 }
1458
1459 if (block->max_length < newsize) {
1460 error_setg_errno(errp, EINVAL,
1461 "Length too large: %s: 0x" RAM_ADDR_FMT
1462 " > 0x" RAM_ADDR_FMT, block->idstr,
1463 newsize, block->max_length);
1464 return -EINVAL;
1465 }
1466
1467 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1468 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001469 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1470 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001471 memory_region_set_size(block->mr, newsize);
1472 if (block->resized) {
1473 block->resized(block->idstr, newsize, block->host);
1474 }
1475 return 0;
1476}
1477
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001478/* Called with ram_list.mutex held */
1479static void dirty_memory_extend(ram_addr_t old_ram_size,
1480 ram_addr_t new_ram_size)
1481{
1482 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1483 DIRTY_MEMORY_BLOCK_SIZE);
1484 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1485 DIRTY_MEMORY_BLOCK_SIZE);
1486 int i;
1487
1488 /* Only need to extend if block count increased */
1489 if (new_num_blocks <= old_num_blocks) {
1490 return;
1491 }
1492
1493 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1494 DirtyMemoryBlocks *old_blocks;
1495 DirtyMemoryBlocks *new_blocks;
1496 int j;
1497
1498 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1499 new_blocks = g_malloc(sizeof(*new_blocks) +
1500 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1501
1502 if (old_num_blocks) {
1503 memcpy(new_blocks->blocks, old_blocks->blocks,
1504 old_num_blocks * sizeof(old_blocks->blocks[0]));
1505 }
1506
1507 for (j = old_num_blocks; j < new_num_blocks; j++) {
1508 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1509 }
1510
1511 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1512
1513 if (old_blocks) {
1514 g_free_rcu(old_blocks, rcu);
1515 }
1516 }
1517}
1518
Fam Zheng528f46a2016-03-01 14:18:18 +08001519static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001520{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001521 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001522 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001523 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001524 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001525
1526 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001527
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001528 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001529 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001530
1531 if (!new_block->host) {
1532 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001533 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001534 new_block->mr, &err);
1535 if (err) {
1536 error_propagate(errp, err);
1537 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001538 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001539 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001540 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001541 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001542 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001543 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001544 error_setg_errno(errp, errno,
1545 "cannot set up guest memory '%s'",
1546 memory_region_name(new_block->mr));
1547 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001548 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001549 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001550 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001551 }
1552 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001553
Li Zhijiandd631692015-07-02 20:18:06 +08001554 new_ram_size = MAX(old_ram_size,
1555 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1556 if (new_ram_size > old_ram_size) {
1557 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001558 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001559 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001560 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1561 * QLIST (which has an RCU-friendly variant) does not have insertion at
1562 * tail, so save the last element in last_block.
1563 */
Mike Day0dc3f442013-09-05 14:41:35 -04001564 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001565 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001566 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001567 break;
1568 }
1569 }
1570 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001571 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001572 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001573 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001574 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001575 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001576 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001577 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001578
Mike Day0dc3f442013-09-05 14:41:35 -04001579 /* Write list before version */
1580 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001581 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001582 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001583
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001584 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001585 new_block->used_length,
1586 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001587
Paolo Bonzinia904c912015-01-21 16:18:35 +01001588 if (new_block->host) {
1589 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1590 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
Cao jinc2cd6272016-09-12 14:34:56 +08001591 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
Paolo Bonzinia904c912015-01-21 16:18:35 +01001592 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001593 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001594}
1595
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001596#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001597RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1598 bool share, const char *mem_path,
1599 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001600{
1601 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001602 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001603
1604 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001605 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001606 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001607 }
1608
1609 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1610 /*
1611 * file_ram_alloc() needs to allocate just like
1612 * phys_mem_alloc, but we haven't bothered to provide
1613 * a hook there.
1614 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001615 error_setg(errp,
1616 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001617 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001618 }
1619
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001620 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001621 new_block = g_malloc0(sizeof(*new_block));
1622 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001623 new_block->used_length = size;
1624 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001625 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001626 new_block->host = file_ram_alloc(new_block, size,
1627 mem_path, errp);
1628 if (!new_block->host) {
1629 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001630 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001631 }
1632
Fam Zheng528f46a2016-03-01 14:18:18 +08001633 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001634 if (local_err) {
1635 g_free(new_block);
1636 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001637 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001638 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001639 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001640}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001641#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001642
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001643static
Fam Zheng528f46a2016-03-01 14:18:18 +08001644RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1645 void (*resized)(const char*,
1646 uint64_t length,
1647 void *host),
1648 void *host, bool resizeable,
1649 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001650{
1651 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001652 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001653
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001654 size = HOST_PAGE_ALIGN(size);
1655 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001656 new_block = g_malloc0(sizeof(*new_block));
1657 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001658 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001659 new_block->used_length = size;
1660 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001661 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001662 new_block->fd = -1;
1663 new_block->host = host;
1664 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001665 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001666 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001667 if (resizeable) {
1668 new_block->flags |= RAM_RESIZEABLE;
1669 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001670 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001671 if (local_err) {
1672 g_free(new_block);
1673 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001674 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001675 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001676 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001677}
1678
Fam Zheng528f46a2016-03-01 14:18:18 +08001679RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001680 MemoryRegion *mr, Error **errp)
1681{
1682 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1683}
1684
Fam Zheng528f46a2016-03-01 14:18:18 +08001685RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001686{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001687 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1688}
1689
Fam Zheng528f46a2016-03-01 14:18:18 +08001690RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001691 void (*resized)(const char*,
1692 uint64_t length,
1693 void *host),
1694 MemoryRegion *mr, Error **errp)
1695{
1696 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001697}
bellarde9a1ab12007-02-08 23:08:38 +00001698
Paolo Bonzini43771532013-09-09 17:58:40 +02001699static void reclaim_ramblock(RAMBlock *block)
1700{
1701 if (block->flags & RAM_PREALLOC) {
1702 ;
1703 } else if (xen_enabled()) {
1704 xen_invalidate_map_cache_entry(block->host);
1705#ifndef _WIN32
1706 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001707 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001708 close(block->fd);
1709#endif
1710 } else {
1711 qemu_anon_ram_free(block->host, block->max_length);
1712 }
1713 g_free(block);
1714}
1715
Fam Zhengf1060c52016-03-01 14:18:22 +08001716void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001717{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001718 if (!block) {
1719 return;
1720 }
1721
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001722 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001723 QLIST_REMOVE_RCU(block, next);
1724 ram_list.mru_block = NULL;
1725 /* Write list before version */
1726 smp_wmb();
1727 ram_list.version++;
1728 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001729 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001730}
1731
Huang Yingcd19cfa2011-03-02 08:56:19 +01001732#ifndef _WIN32
1733void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1734{
1735 RAMBlock *block;
1736 ram_addr_t offset;
1737 int flags;
1738 void *area, *vaddr;
1739
Mike Day0dc3f442013-09-05 14:41:35 -04001740 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001741 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001742 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001743 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001744 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001745 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001746 } else if (xen_enabled()) {
1747 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001748 } else {
1749 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001750 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001751 flags |= (block->flags & RAM_SHARED ?
1752 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001753 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1754 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001755 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001756 /*
1757 * Remap needs to match alloc. Accelerators that
1758 * set phys_mem_alloc never remap. If they did,
1759 * we'd need a remap hook here.
1760 */
1761 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1762
Huang Yingcd19cfa2011-03-02 08:56:19 +01001763 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1764 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1765 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001766 }
1767 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001768 fprintf(stderr, "Could not remap addr: "
1769 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001770 length, addr);
1771 exit(1);
1772 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001773 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001774 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001775 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001776 }
1777 }
1778}
1779#endif /* !_WIN32 */
1780
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001781/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001782 * This should not be used for general purpose DMA. Use address_space_map
1783 * or address_space_rw instead. For local memory (e.g. video ram) that the
1784 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001785 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001786 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001787 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001788void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001789{
Gonglei3655cb92016-02-20 10:35:20 +08001790 RAMBlock *block = ram_block;
1791
1792 if (block == NULL) {
1793 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001794 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001795 }
Mike Dayae3a7042013-09-05 14:41:35 -04001796
1797 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001798 /* We need to check if the requested address is in the RAM
1799 * because we don't want to map the entire memory in QEMU.
1800 * In that case just map until the end of the page.
1801 */
1802 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001803 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001804 }
Mike Dayae3a7042013-09-05 14:41:35 -04001805
1806 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001807 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001808 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001809}
1810
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001811/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001812 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001813 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001814 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001815 */
Gonglei3655cb92016-02-20 10:35:20 +08001816static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1817 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001818{
Gonglei3655cb92016-02-20 10:35:20 +08001819 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001820 if (*size == 0) {
1821 return NULL;
1822 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001823
Gonglei3655cb92016-02-20 10:35:20 +08001824 if (block == NULL) {
1825 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001826 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001827 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001828 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001829
1830 if (xen_enabled() && block->host == NULL) {
1831 /* We need to check if the requested address is in the RAM
1832 * because we don't want to map the entire memory in QEMU.
1833 * In that case just map the requested area.
1834 */
1835 if (block->offset == 0) {
1836 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001837 }
1838
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001839 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001840 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001841
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001842 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001843}
1844
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001845/*
1846 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1847 * in that RAMBlock.
1848 *
1849 * ptr: Host pointer to look up
1850 * round_offset: If true round the result offset down to a page boundary
1851 * *ram_addr: set to result ram_addr
1852 * *offset: set to result offset within the RAMBlock
1853 *
1854 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001855 *
1856 * By the time this function returns, the returned pointer is not protected
1857 * by RCU anymore. If the caller is not within an RCU critical section and
1858 * does not hold the iothread lock, it must have other means of protecting the
1859 * pointer, such as a reference to the region that includes the incoming
1860 * ram_addr_t.
1861 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001862RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001863 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001864{
pbrook94a6b542009-04-11 17:15:54 +00001865 RAMBlock *block;
1866 uint8_t *host = ptr;
1867
Jan Kiszka868bb332011-06-21 22:59:09 +02001868 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001869 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001870 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001871 ram_addr = xen_ram_addr_from_mapcache(ptr);
1872 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001873 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001874 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001875 }
Mike Day0dc3f442013-09-05 14:41:35 -04001876 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001877 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001878 }
1879
Mike Day0dc3f442013-09-05 14:41:35 -04001880 rcu_read_lock();
1881 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001882 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001883 goto found;
1884 }
1885
Mike Day0dc3f442013-09-05 14:41:35 -04001886 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001887 /* This case append when the block is not mapped. */
1888 if (block->host == NULL) {
1889 continue;
1890 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001891 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001892 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001893 }
pbrook94a6b542009-04-11 17:15:54 +00001894 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001895
Mike Day0dc3f442013-09-05 14:41:35 -04001896 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001897 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001898
1899found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001900 *offset = (host - block->host);
1901 if (round_offset) {
1902 *offset &= TARGET_PAGE_MASK;
1903 }
Mike Day0dc3f442013-09-05 14:41:35 -04001904 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001905 return block;
1906}
1907
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001908/*
1909 * Finds the named RAMBlock
1910 *
1911 * name: The name of RAMBlock to find
1912 *
1913 * Returns: RAMBlock (or NULL if not found)
1914 */
1915RAMBlock *qemu_ram_block_by_name(const char *name)
1916{
1917 RAMBlock *block;
1918
1919 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1920 if (!strcmp(name, block->idstr)) {
1921 return block;
1922 }
1923 }
1924
1925 return NULL;
1926}
1927
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001928/* Some of the softmmu routines need to translate from a host pointer
1929 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001930ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001931{
1932 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001933 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001934
Paolo Bonzinif615f392016-05-26 10:07:50 +02001935 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001936 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001937 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001938 }
1939
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001940 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001941}
Alex Williamsonf471a172010-06-11 11:11:42 -06001942
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001943/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001944static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001945 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001946{
Juan Quintela52159192013-10-08 12:44:04 +02001947 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001948 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001949 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001950 switch (size) {
1951 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001952 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001953 break;
1954 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001955 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001956 break;
1957 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001958 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001959 break;
1960 default:
1961 abort();
1962 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001963 /* Set both VGA and migration bits for simplicity and to remove
1964 * the notdirty callback faster.
1965 */
1966 cpu_physical_memory_set_dirty_range(ram_addr, size,
1967 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001968 /* we remove the notdirty callback only if the code has been
1969 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001970 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001971 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001972 }
bellard1ccde1c2004-02-06 19:46:14 +00001973}
1974
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001975static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1976 unsigned size, bool is_write)
1977{
1978 return is_write;
1979}
1980
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001981static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001982 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001983 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001984 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001985};
1986
pbrook0f459d12008-06-09 00:20:13 +00001987/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001988static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001989{
Andreas Färber93afead2013-08-26 03:41:01 +02001990 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00001991 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02001992 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001993 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001994 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001995 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04001996 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001997
Andreas Färberff4700b2013-08-26 18:23:18 +02001998 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001999 /* We re-entered the check after replacing the TB. Now raise
2000 * the debug interrupt so that is will trigger after the
2001 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002002 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002003 return;
2004 }
Andreas Färber93afead2013-08-26 03:41:01 +02002005 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002006 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002007 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2008 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002009 if (flags == BP_MEM_READ) {
2010 wp->flags |= BP_WATCHPOINT_HIT_READ;
2011 } else {
2012 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2013 }
2014 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002015 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002016 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002017 if (wp->flags & BP_CPU &&
2018 !cc->debug_check_watchpoint(cpu, wp)) {
2019 wp->flags &= ~BP_WATCHPOINT_HIT;
2020 continue;
2021 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002022 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002023 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002024 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002025 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002026 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002027 } else {
2028 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002029 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002030 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002031 }
aliguori06d55cc2008-11-18 20:24:06 +00002032 }
aliguori6e140f22008-11-18 20:37:55 +00002033 } else {
2034 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002035 }
2036 }
2037}
2038
pbrook6658ffb2007-03-16 23:58:11 +00002039/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2040 so these check for a hit then pass through to the normal out-of-line
2041 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002042static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2043 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002044{
Peter Maydell66b9b432015-04-26 16:49:24 +01002045 MemTxResult res;
2046 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002047 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2048 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002049
Peter Maydell66b9b432015-04-26 16:49:24 +01002050 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002051 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002052 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002053 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002054 break;
2055 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002056 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002057 break;
2058 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002059 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002060 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002061 default: abort();
2062 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002063 *pdata = data;
2064 return res;
2065}
2066
2067static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2068 uint64_t val, unsigned size,
2069 MemTxAttrs attrs)
2070{
2071 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002072 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2073 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002074
2075 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2076 switch (size) {
2077 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002078 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002079 break;
2080 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002081 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002082 break;
2083 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002084 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002085 break;
2086 default: abort();
2087 }
2088 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002089}
2090
Avi Kivity1ec9b902012-01-02 12:47:48 +02002091static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002092 .read_with_attrs = watch_mem_read,
2093 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002094 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002095};
pbrook6658ffb2007-03-16 23:58:11 +00002096
Peter Maydellf25a49e2015-04-26 16:49:24 +01002097static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2098 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002099{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002100 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002101 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002102 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002103
blueswir1db7b5422007-05-26 17:36:03 +00002104#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002105 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002106 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002107#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002108 res = address_space_read(subpage->as, addr + subpage->base,
2109 attrs, buf, len);
2110 if (res) {
2111 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002112 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002113 switch (len) {
2114 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002115 *data = ldub_p(buf);
2116 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002117 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002118 *data = lduw_p(buf);
2119 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002120 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002121 *data = ldl_p(buf);
2122 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002123 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002124 *data = ldq_p(buf);
2125 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002126 default:
2127 abort();
2128 }
blueswir1db7b5422007-05-26 17:36:03 +00002129}
2130
Peter Maydellf25a49e2015-04-26 16:49:24 +01002131static MemTxResult subpage_write(void *opaque, hwaddr addr,
2132 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002133{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002134 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002135 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002136
blueswir1db7b5422007-05-26 17:36:03 +00002137#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002138 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002139 " value %"PRIx64"\n",
2140 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002141#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002142 switch (len) {
2143 case 1:
2144 stb_p(buf, value);
2145 break;
2146 case 2:
2147 stw_p(buf, value);
2148 break;
2149 case 4:
2150 stl_p(buf, value);
2151 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002152 case 8:
2153 stq_p(buf, value);
2154 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002155 default:
2156 abort();
2157 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002158 return address_space_write(subpage->as, addr + subpage->base,
2159 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002160}
2161
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002162static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002163 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002164{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002165 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002166#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002167 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002168 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002169#endif
2170
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002171 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002172 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002173}
2174
Avi Kivity70c68e42012-01-02 12:32:48 +02002175static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002176 .read_with_attrs = subpage_read,
2177 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002178 .impl.min_access_size = 1,
2179 .impl.max_access_size = 8,
2180 .valid.min_access_size = 1,
2181 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002182 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002183 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002184};
2185
Anthony Liguoric227f092009-10-01 16:12:16 -05002186static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002187 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002188{
2189 int idx, eidx;
2190
2191 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2192 return -1;
2193 idx = SUBPAGE_IDX(start);
2194 eidx = SUBPAGE_IDX(end);
2195#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002196 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2197 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002198#endif
blueswir1db7b5422007-05-26 17:36:03 +00002199 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002200 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002201 }
2202
2203 return 0;
2204}
2205
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002206static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002207{
Anthony Liguoric227f092009-10-01 16:12:16 -05002208 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002209
Anthony Liguori7267c092011-08-20 22:09:37 -05002210 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002211
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002212 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002213 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002214 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002215 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002216 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002217#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002218 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2219 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002220#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002221 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002222
2223 return mmio;
2224}
2225
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002226static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2227 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002228{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002229 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002230 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002231 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002232 .mr = mr,
2233 .offset_within_address_space = 0,
2234 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002235 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002236 };
2237
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002238 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002239}
2240
Peter Maydella54c87b2016-01-21 14:15:05 +00002241MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002242{
Peter Maydella54c87b2016-01-21 14:15:05 +00002243 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2244 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002245 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002246 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002247
2248 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002249}
2250
Avi Kivitye9179ce2009-06-14 11:38:52 +03002251static void io_mem_init(void)
2252{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002253 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002254 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002255 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002256 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002257 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002258 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002259 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002260}
2261
Avi Kivityac1970f2012-10-03 16:22:53 +02002262static void mem_begin(MemoryListener *listener)
2263{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002264 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002265 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2266 uint16_t n;
2267
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002268 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002269 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002270 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002271 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002272 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002273 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002274 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002275 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002276
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002277 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002278 d->as = as;
2279 as->next_dispatch = d;
2280}
2281
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002282static void address_space_dispatch_free(AddressSpaceDispatch *d)
2283{
2284 phys_sections_free(&d->map);
2285 g_free(d);
2286}
2287
Paolo Bonzini00752702013-05-29 12:13:54 +02002288static void mem_commit(MemoryListener *listener)
2289{
2290 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002291 AddressSpaceDispatch *cur = as->dispatch;
2292 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002293
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002294 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002295
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002296 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002297 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002298 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002299 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002300}
2301
Avi Kivity1d711482012-10-02 18:54:45 +02002302static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002303{
Peter Maydell32857f42015-10-01 15:29:50 +01002304 CPUAddressSpace *cpuas;
2305 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002306
2307 /* since each CPU stores ram addresses in its TLB cache, we must
2308 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002309 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2310 cpu_reloading_memory_map();
2311 /* The CPU and TLB are protected by the iothread lock.
2312 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2313 * may have split the RCU critical section.
2314 */
2315 d = atomic_rcu_read(&cpuas->as->dispatch);
2316 cpuas->memory_dispatch = d;
2317 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002318}
2319
Avi Kivityac1970f2012-10-03 16:22:53 +02002320void address_space_init_dispatch(AddressSpace *as)
2321{
Paolo Bonzini00752702013-05-29 12:13:54 +02002322 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002323 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002324 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002325 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002326 .region_add = mem_add,
2327 .region_nop = mem_add,
2328 .priority = 0,
2329 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002330 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002331}
2332
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002333void address_space_unregister(AddressSpace *as)
2334{
2335 memory_listener_unregister(&as->dispatch_listener);
2336}
2337
Avi Kivity83f3c252012-10-07 12:59:55 +02002338void address_space_destroy_dispatch(AddressSpace *as)
2339{
2340 AddressSpaceDispatch *d = as->dispatch;
2341
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002342 atomic_rcu_set(&as->dispatch, NULL);
2343 if (d) {
2344 call_rcu(d, address_space_dispatch_free, rcu);
2345 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002346}
2347
Avi Kivity62152b82011-07-26 14:26:14 +03002348static void memory_map_init(void)
2349{
Anthony Liguori7267c092011-08-20 22:09:37 -05002350 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002351
Paolo Bonzini57271d62013-11-07 17:14:37 +01002352 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002353 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002354
Anthony Liguori7267c092011-08-20 22:09:37 -05002355 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002356 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2357 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002358 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002359}
2360
2361MemoryRegion *get_system_memory(void)
2362{
2363 return system_memory;
2364}
2365
Avi Kivity309cb472011-08-08 16:09:03 +03002366MemoryRegion *get_system_io(void)
2367{
2368 return system_io;
2369}
2370
pbrooke2eef172008-06-08 01:09:01 +00002371#endif /* !defined(CONFIG_USER_ONLY) */
2372
bellard13eb76e2004-01-24 15:23:36 +00002373/* physical memory access (slow version, mainly for debug) */
2374#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002375int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002376 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002377{
2378 int l, flags;
2379 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002380 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002381
2382 while (len > 0) {
2383 page = addr & TARGET_PAGE_MASK;
2384 l = (page + TARGET_PAGE_SIZE) - addr;
2385 if (l > len)
2386 l = len;
2387 flags = page_get_flags(page);
2388 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002389 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002390 if (is_write) {
2391 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002392 return -1;
bellard579a97f2007-11-11 14:26:47 +00002393 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002394 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002395 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002396 memcpy(p, buf, l);
2397 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002398 } else {
2399 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002400 return -1;
bellard579a97f2007-11-11 14:26:47 +00002401 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002402 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002403 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002404 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002405 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002406 }
2407 len -= l;
2408 buf += l;
2409 addr += l;
2410 }
Paul Brooka68fe892010-03-01 00:08:59 +00002411 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002412}
bellard8df1cd02005-01-28 22:37:22 +00002413
bellard13eb76e2004-01-24 15:23:36 +00002414#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002415
Paolo Bonzini845b6212015-03-23 11:45:53 +01002416static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002417 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002418{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002419 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002420 addr += memory_region_get_ram_addr(mr);
2421
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002422 /* No early return if dirty_log_mask is or becomes 0, because
2423 * cpu_physical_memory_set_dirty_range will still call
2424 * xen_modified_memory.
2425 */
2426 if (dirty_log_mask) {
2427 dirty_log_mask =
2428 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002429 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002430 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2431 tb_invalidate_phys_range(addr, addr + length);
2432 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2433 }
2434 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002435}
2436
Richard Henderson23326162013-07-08 14:55:59 -07002437static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002438{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002439 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002440
2441 /* Regions are assumed to support 1-4 byte accesses unless
2442 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002443 if (access_size_max == 0) {
2444 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002445 }
Richard Henderson23326162013-07-08 14:55:59 -07002446
2447 /* Bound the maximum access by the alignment of the address. */
2448 if (!mr->ops->impl.unaligned) {
2449 unsigned align_size_max = addr & -addr;
2450 if (align_size_max != 0 && align_size_max < access_size_max) {
2451 access_size_max = align_size_max;
2452 }
2453 }
2454
2455 /* Don't attempt accesses larger than the maximum. */
2456 if (l > access_size_max) {
2457 l = access_size_max;
2458 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002459 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002460
2461 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002462}
2463
Jan Kiszka4840f102015-06-18 18:47:22 +02002464static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002465{
Jan Kiszka4840f102015-06-18 18:47:22 +02002466 bool unlocked = !qemu_mutex_iothread_locked();
2467 bool release_lock = false;
2468
2469 if (unlocked && mr->global_locking) {
2470 qemu_mutex_lock_iothread();
2471 unlocked = false;
2472 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002473 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002474 if (mr->flush_coalesced_mmio) {
2475 if (unlocked) {
2476 qemu_mutex_lock_iothread();
2477 }
2478 qemu_flush_coalesced_mmio_buffer();
2479 if (unlocked) {
2480 qemu_mutex_unlock_iothread();
2481 }
2482 }
2483
2484 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002485}
2486
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002487/* Called within RCU critical section. */
2488static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2489 MemTxAttrs attrs,
2490 const uint8_t *buf,
2491 int len, hwaddr addr1,
2492 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002493{
bellard13eb76e2004-01-24 15:23:36 +00002494 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002495 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002496 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002497 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002498
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002499 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002500 if (!memory_access_is_direct(mr, true)) {
2501 release_lock |= prepare_mmio_access(mr);
2502 l = memory_access_size(mr, l, addr1);
2503 /* XXX: could force current_cpu to NULL to avoid
2504 potential bugs */
2505 switch (l) {
2506 case 8:
2507 /* 64 bit write access */
2508 val = ldq_p(buf);
2509 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2510 attrs);
2511 break;
2512 case 4:
2513 /* 32 bit write access */
2514 val = ldl_p(buf);
2515 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2516 attrs);
2517 break;
2518 case 2:
2519 /* 16 bit write access */
2520 val = lduw_p(buf);
2521 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2522 attrs);
2523 break;
2524 case 1:
2525 /* 8 bit write access */
2526 val = ldub_p(buf);
2527 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2528 attrs);
2529 break;
2530 default:
2531 abort();
bellard13eb76e2004-01-24 15:23:36 +00002532 }
2533 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002534 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002535 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002536 memcpy(ptr, buf, l);
2537 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002538 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002539
2540 if (release_lock) {
2541 qemu_mutex_unlock_iothread();
2542 release_lock = false;
2543 }
2544
bellard13eb76e2004-01-24 15:23:36 +00002545 len -= l;
2546 buf += l;
2547 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002548
2549 if (!len) {
2550 break;
2551 }
2552
2553 l = len;
2554 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002555 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002556
Peter Maydell3b643492015-04-26 16:49:23 +01002557 return result;
bellard13eb76e2004-01-24 15:23:36 +00002558}
bellard8df1cd02005-01-28 22:37:22 +00002559
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002560MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2561 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002562{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002563 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002564 hwaddr addr1;
2565 MemoryRegion *mr;
2566 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002567
2568 if (len > 0) {
2569 rcu_read_lock();
2570 l = len;
2571 mr = address_space_translate(as, addr, &addr1, &l, true);
2572 result = address_space_write_continue(as, addr, attrs, buf, len,
2573 addr1, l, mr);
2574 rcu_read_unlock();
2575 }
2576
2577 return result;
2578}
2579
2580/* Called within RCU critical section. */
2581MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2582 MemTxAttrs attrs, uint8_t *buf,
2583 int len, hwaddr addr1, hwaddr l,
2584 MemoryRegion *mr)
2585{
2586 uint8_t *ptr;
2587 uint64_t val;
2588 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002589 bool release_lock = false;
2590
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002591 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002592 if (!memory_access_is_direct(mr, false)) {
2593 /* I/O case */
2594 release_lock |= prepare_mmio_access(mr);
2595 l = memory_access_size(mr, l, addr1);
2596 switch (l) {
2597 case 8:
2598 /* 64 bit read access */
2599 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2600 attrs);
2601 stq_p(buf, val);
2602 break;
2603 case 4:
2604 /* 32 bit read access */
2605 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2606 attrs);
2607 stl_p(buf, val);
2608 break;
2609 case 2:
2610 /* 16 bit read access */
2611 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2612 attrs);
2613 stw_p(buf, val);
2614 break;
2615 case 1:
2616 /* 8 bit read access */
2617 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2618 attrs);
2619 stb_p(buf, val);
2620 break;
2621 default:
2622 abort();
2623 }
2624 } else {
2625 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002626 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002627 memcpy(buf, ptr, l);
2628 }
2629
2630 if (release_lock) {
2631 qemu_mutex_unlock_iothread();
2632 release_lock = false;
2633 }
2634
2635 len -= l;
2636 buf += l;
2637 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002638
2639 if (!len) {
2640 break;
2641 }
2642
2643 l = len;
2644 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002645 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002646
2647 return result;
2648}
2649
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002650MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2651 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002652{
2653 hwaddr l;
2654 hwaddr addr1;
2655 MemoryRegion *mr;
2656 MemTxResult result = MEMTX_OK;
2657
2658 if (len > 0) {
2659 rcu_read_lock();
2660 l = len;
2661 mr = address_space_translate(as, addr, &addr1, &l, false);
2662 result = address_space_read_continue(as, addr, attrs, buf, len,
2663 addr1, l, mr);
2664 rcu_read_unlock();
2665 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002666
2667 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002668}
2669
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002670MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2671 uint8_t *buf, int len, bool is_write)
2672{
2673 if (is_write) {
2674 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2675 } else {
2676 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2677 }
2678}
Avi Kivityac1970f2012-10-03 16:22:53 +02002679
Avi Kivitya8170e52012-10-23 12:30:10 +02002680void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002681 int len, int is_write)
2682{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002683 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2684 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002685}
2686
Alexander Graf582b55a2013-12-11 14:17:44 +01002687enum write_rom_type {
2688 WRITE_DATA,
2689 FLUSH_CACHE,
2690};
2691
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002692static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002693 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002694{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002695 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002696 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002697 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002698 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002699
Paolo Bonzini41063e12015-03-18 14:21:43 +01002700 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002701 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002702 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002703 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002704
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002705 if (!(memory_region_is_ram(mr) ||
2706 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002707 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002708 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002709 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002710 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002711 switch (type) {
2712 case WRITE_DATA:
2713 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002714 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002715 break;
2716 case FLUSH_CACHE:
2717 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2718 break;
2719 }
bellardd0ecd2a2006-04-23 17:14:48 +00002720 }
2721 len -= l;
2722 buf += l;
2723 addr += l;
2724 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002725 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002726}
2727
Alexander Graf582b55a2013-12-11 14:17:44 +01002728/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002729void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002730 const uint8_t *buf, int len)
2731{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002732 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002733}
2734
2735void cpu_flush_icache_range(hwaddr start, int len)
2736{
2737 /*
2738 * This function should do the same thing as an icache flush that was
2739 * triggered from within the guest. For TCG we are always cache coherent,
2740 * so there is no need to flush anything. For KVM / Xen we need to flush
2741 * the host's instruction cache at least.
2742 */
2743 if (tcg_enabled()) {
2744 return;
2745 }
2746
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002747 cpu_physical_memory_write_rom_internal(&address_space_memory,
2748 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002749}
2750
aliguori6d16c2f2009-01-22 16:59:11 +00002751typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002752 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002753 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002754 hwaddr addr;
2755 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002756 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002757} BounceBuffer;
2758
2759static BounceBuffer bounce;
2760
aliguoriba223c22009-01-22 16:59:16 +00002761typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002762 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002763 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002764} MapClient;
2765
Fam Zheng38e047b2015-03-16 17:03:35 +08002766QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002767static QLIST_HEAD(map_client_list, MapClient) map_client_list
2768 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002769
Fam Zhenge95205e2015-03-16 17:03:37 +08002770static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002771{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002772 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002773 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002774}
2775
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002776static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002777{
2778 MapClient *client;
2779
Blue Swirl72cf2d42009-09-12 07:36:22 +00002780 while (!QLIST_EMPTY(&map_client_list)) {
2781 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002782 qemu_bh_schedule(client->bh);
2783 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002784 }
2785}
2786
Fam Zhenge95205e2015-03-16 17:03:37 +08002787void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002788{
2789 MapClient *client = g_malloc(sizeof(*client));
2790
Fam Zheng38e047b2015-03-16 17:03:35 +08002791 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002792 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002793 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002794 if (!atomic_read(&bounce.in_use)) {
2795 cpu_notify_map_clients_locked();
2796 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002797 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002798}
2799
Fam Zheng38e047b2015-03-16 17:03:35 +08002800void cpu_exec_init_all(void)
2801{
2802 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002803 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002804 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002805 qemu_mutex_init(&map_client_list_lock);
2806}
2807
Fam Zhenge95205e2015-03-16 17:03:37 +08002808void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002809{
Fam Zhenge95205e2015-03-16 17:03:37 +08002810 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002811
Fam Zhenge95205e2015-03-16 17:03:37 +08002812 qemu_mutex_lock(&map_client_list_lock);
2813 QLIST_FOREACH(client, &map_client_list, link) {
2814 if (client->bh == bh) {
2815 cpu_unregister_map_client_do(client);
2816 break;
2817 }
2818 }
2819 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002820}
2821
2822static void cpu_notify_map_clients(void)
2823{
Fam Zheng38e047b2015-03-16 17:03:35 +08002824 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002825 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002826 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002827}
2828
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002829bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2830{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002831 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002832 hwaddr l, xlat;
2833
Paolo Bonzini41063e12015-03-18 14:21:43 +01002834 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002835 while (len > 0) {
2836 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002837 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2838 if (!memory_access_is_direct(mr, is_write)) {
2839 l = memory_access_size(mr, l, addr);
2840 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002841 return false;
2842 }
2843 }
2844
2845 len -= l;
2846 addr += l;
2847 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002848 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002849 return true;
2850}
2851
aliguori6d16c2f2009-01-22 16:59:11 +00002852/* Map a physical memory region into a host virtual address.
2853 * May map a subset of the requested range, given by and returned in *plen.
2854 * May return NULL if resources needed to perform the mapping are exhausted.
2855 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002856 * Use cpu_register_map_client() to know when retrying the map operation is
2857 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002858 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002859void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002860 hwaddr addr,
2861 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002862 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002863{
Avi Kivitya8170e52012-10-23 12:30:10 +02002864 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002865 hwaddr done = 0;
2866 hwaddr l, xlat, base;
2867 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002868 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002869
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002870 if (len == 0) {
2871 return NULL;
2872 }
aliguori6d16c2f2009-01-22 16:59:11 +00002873
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002874 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002875 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002876 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002877
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002878 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002879 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002880 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002881 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002882 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002883 /* Avoid unbounded allocations */
2884 l = MIN(l, TARGET_PAGE_SIZE);
2885 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002886 bounce.addr = addr;
2887 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002888
2889 memory_region_ref(mr);
2890 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002891 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002892 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2893 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002894 }
aliguori6d16c2f2009-01-22 16:59:11 +00002895
Paolo Bonzini41063e12015-03-18 14:21:43 +01002896 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002897 *plen = l;
2898 return bounce.buffer;
2899 }
2900
2901 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002902
2903 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002904 len -= l;
2905 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002906 done += l;
2907 if (len == 0) {
2908 break;
2909 }
2910
2911 l = len;
2912 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2913 if (this_mr != mr || xlat != base + done) {
2914 break;
2915 }
aliguori6d16c2f2009-01-22 16:59:11 +00002916 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002917
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002918 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002919 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002920 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002921 rcu_read_unlock();
2922
2923 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002924}
2925
Avi Kivityac1970f2012-10-03 16:22:53 +02002926/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002927 * Will also mark the memory as dirty if is_write == 1. access_len gives
2928 * the amount of memory that was actually read or written by the caller.
2929 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002930void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2931 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002932{
2933 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002934 MemoryRegion *mr;
2935 ram_addr_t addr1;
2936
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002937 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002938 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002939 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002940 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002941 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002942 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002943 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002944 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002945 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002946 return;
2947 }
2948 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002949 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2950 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002951 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002952 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002953 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002954 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002955 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002956 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002957}
bellardd0ecd2a2006-04-23 17:14:48 +00002958
Avi Kivitya8170e52012-10-23 12:30:10 +02002959void *cpu_physical_memory_map(hwaddr addr,
2960 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002961 int is_write)
2962{
2963 return address_space_map(&address_space_memory, addr, plen, is_write);
2964}
2965
Avi Kivitya8170e52012-10-23 12:30:10 +02002966void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2967 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002968{
2969 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2970}
2971
bellard8df1cd02005-01-28 22:37:22 +00002972/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002973static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2974 MemTxAttrs attrs,
2975 MemTxResult *result,
2976 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002977{
bellard8df1cd02005-01-28 22:37:22 +00002978 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002979 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002980 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002981 hwaddr l = 4;
2982 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002983 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002984 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002985
Paolo Bonzini41063e12015-03-18 14:21:43 +01002986 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002987 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002988 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002989 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002990
bellard8df1cd02005-01-28 22:37:22 +00002991 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002992 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002993#if defined(TARGET_WORDS_BIGENDIAN)
2994 if (endian == DEVICE_LITTLE_ENDIAN) {
2995 val = bswap32(val);
2996 }
2997#else
2998 if (endian == DEVICE_BIG_ENDIAN) {
2999 val = bswap32(val);
3000 }
3001#endif
bellard8df1cd02005-01-28 22:37:22 +00003002 } else {
3003 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003004 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003005 switch (endian) {
3006 case DEVICE_LITTLE_ENDIAN:
3007 val = ldl_le_p(ptr);
3008 break;
3009 case DEVICE_BIG_ENDIAN:
3010 val = ldl_be_p(ptr);
3011 break;
3012 default:
3013 val = ldl_p(ptr);
3014 break;
3015 }
Peter Maydell50013112015-04-26 16:49:24 +01003016 r = MEMTX_OK;
3017 }
3018 if (result) {
3019 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003020 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003021 if (release_lock) {
3022 qemu_mutex_unlock_iothread();
3023 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003024 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003025 return val;
3026}
3027
Peter Maydell50013112015-04-26 16:49:24 +01003028uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3029 MemTxAttrs attrs, MemTxResult *result)
3030{
3031 return address_space_ldl_internal(as, addr, attrs, result,
3032 DEVICE_NATIVE_ENDIAN);
3033}
3034
3035uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3036 MemTxAttrs attrs, MemTxResult *result)
3037{
3038 return address_space_ldl_internal(as, addr, attrs, result,
3039 DEVICE_LITTLE_ENDIAN);
3040}
3041
3042uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3043 MemTxAttrs attrs, MemTxResult *result)
3044{
3045 return address_space_ldl_internal(as, addr, attrs, result,
3046 DEVICE_BIG_ENDIAN);
3047}
3048
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003049uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003050{
Peter Maydell50013112015-04-26 16:49:24 +01003051 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003052}
3053
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003054uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003055{
Peter Maydell50013112015-04-26 16:49:24 +01003056 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003057}
3058
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003059uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003060{
Peter Maydell50013112015-04-26 16:49:24 +01003061 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003062}
3063
bellard84b7b8e2005-11-28 21:19:04 +00003064/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003065static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3066 MemTxAttrs attrs,
3067 MemTxResult *result,
3068 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003069{
bellard84b7b8e2005-11-28 21:19:04 +00003070 uint8_t *ptr;
3071 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003072 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003073 hwaddr l = 8;
3074 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003075 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003076 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003077
Paolo Bonzini41063e12015-03-18 14:21:43 +01003078 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003079 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003080 false);
3081 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003082 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003083
bellard84b7b8e2005-11-28 21:19:04 +00003084 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003085 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003086#if defined(TARGET_WORDS_BIGENDIAN)
3087 if (endian == DEVICE_LITTLE_ENDIAN) {
3088 val = bswap64(val);
3089 }
3090#else
3091 if (endian == DEVICE_BIG_ENDIAN) {
3092 val = bswap64(val);
3093 }
3094#endif
bellard84b7b8e2005-11-28 21:19:04 +00003095 } else {
3096 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003097 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003098 switch (endian) {
3099 case DEVICE_LITTLE_ENDIAN:
3100 val = ldq_le_p(ptr);
3101 break;
3102 case DEVICE_BIG_ENDIAN:
3103 val = ldq_be_p(ptr);
3104 break;
3105 default:
3106 val = ldq_p(ptr);
3107 break;
3108 }
Peter Maydell50013112015-04-26 16:49:24 +01003109 r = MEMTX_OK;
3110 }
3111 if (result) {
3112 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003113 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003114 if (release_lock) {
3115 qemu_mutex_unlock_iothread();
3116 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003117 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003118 return val;
3119}
3120
Peter Maydell50013112015-04-26 16:49:24 +01003121uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3122 MemTxAttrs attrs, MemTxResult *result)
3123{
3124 return address_space_ldq_internal(as, addr, attrs, result,
3125 DEVICE_NATIVE_ENDIAN);
3126}
3127
3128uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3129 MemTxAttrs attrs, MemTxResult *result)
3130{
3131 return address_space_ldq_internal(as, addr, attrs, result,
3132 DEVICE_LITTLE_ENDIAN);
3133}
3134
3135uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3136 MemTxAttrs attrs, MemTxResult *result)
3137{
3138 return address_space_ldq_internal(as, addr, attrs, result,
3139 DEVICE_BIG_ENDIAN);
3140}
3141
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003142uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003143{
Peter Maydell50013112015-04-26 16:49:24 +01003144 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003145}
3146
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003147uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003148{
Peter Maydell50013112015-04-26 16:49:24 +01003149 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003150}
3151
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003152uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003153{
Peter Maydell50013112015-04-26 16:49:24 +01003154 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003155}
3156
bellardaab33092005-10-30 20:48:42 +00003157/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003158uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3159 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003160{
3161 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003162 MemTxResult r;
3163
3164 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3165 if (result) {
3166 *result = r;
3167 }
bellardaab33092005-10-30 20:48:42 +00003168 return val;
3169}
3170
Peter Maydell50013112015-04-26 16:49:24 +01003171uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3172{
3173 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3174}
3175
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003176/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003177static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3178 hwaddr addr,
3179 MemTxAttrs attrs,
3180 MemTxResult *result,
3181 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003182{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003183 uint8_t *ptr;
3184 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003185 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003186 hwaddr l = 2;
3187 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003188 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003189 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003190
Paolo Bonzini41063e12015-03-18 14:21:43 +01003191 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003192 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003193 false);
3194 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003195 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003196
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003197 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003198 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003199#if defined(TARGET_WORDS_BIGENDIAN)
3200 if (endian == DEVICE_LITTLE_ENDIAN) {
3201 val = bswap16(val);
3202 }
3203#else
3204 if (endian == DEVICE_BIG_ENDIAN) {
3205 val = bswap16(val);
3206 }
3207#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003208 } else {
3209 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003210 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003211 switch (endian) {
3212 case DEVICE_LITTLE_ENDIAN:
3213 val = lduw_le_p(ptr);
3214 break;
3215 case DEVICE_BIG_ENDIAN:
3216 val = lduw_be_p(ptr);
3217 break;
3218 default:
3219 val = lduw_p(ptr);
3220 break;
3221 }
Peter Maydell50013112015-04-26 16:49:24 +01003222 r = MEMTX_OK;
3223 }
3224 if (result) {
3225 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003226 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003227 if (release_lock) {
3228 qemu_mutex_unlock_iothread();
3229 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003230 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003231 return val;
bellardaab33092005-10-30 20:48:42 +00003232}
3233
Peter Maydell50013112015-04-26 16:49:24 +01003234uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3235 MemTxAttrs attrs, MemTxResult *result)
3236{
3237 return address_space_lduw_internal(as, addr, attrs, result,
3238 DEVICE_NATIVE_ENDIAN);
3239}
3240
3241uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3242 MemTxAttrs attrs, MemTxResult *result)
3243{
3244 return address_space_lduw_internal(as, addr, attrs, result,
3245 DEVICE_LITTLE_ENDIAN);
3246}
3247
3248uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3249 MemTxAttrs attrs, MemTxResult *result)
3250{
3251 return address_space_lduw_internal(as, addr, attrs, result,
3252 DEVICE_BIG_ENDIAN);
3253}
3254
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003255uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003256{
Peter Maydell50013112015-04-26 16:49:24 +01003257 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003258}
3259
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003260uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003261{
Peter Maydell50013112015-04-26 16:49:24 +01003262 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003263}
3264
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003265uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003266{
Peter Maydell50013112015-04-26 16:49:24 +01003267 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003268}
3269
bellard8df1cd02005-01-28 22:37:22 +00003270/* warning: addr must be aligned. The ram page is not masked as dirty
3271 and the code inside is not invalidated. It is useful if the dirty
3272 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003273void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3274 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003275{
bellard8df1cd02005-01-28 22:37:22 +00003276 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003277 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003278 hwaddr l = 4;
3279 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003280 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003281 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003282 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003283
Paolo Bonzini41063e12015-03-18 14:21:43 +01003284 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003285 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003286 true);
3287 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003288 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003289
Peter Maydell50013112015-04-26 16:49:24 +01003290 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003291 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003292 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003293 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003294
Paolo Bonzini845b6212015-03-23 11:45:53 +01003295 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3296 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003297 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3298 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003299 r = MEMTX_OK;
3300 }
3301 if (result) {
3302 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003303 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003304 if (release_lock) {
3305 qemu_mutex_unlock_iothread();
3306 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003307 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003308}
3309
Peter Maydell50013112015-04-26 16:49:24 +01003310void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3311{
3312 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3313}
3314
bellard8df1cd02005-01-28 22:37:22 +00003315/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003316static inline void address_space_stl_internal(AddressSpace *as,
3317 hwaddr addr, uint32_t val,
3318 MemTxAttrs attrs,
3319 MemTxResult *result,
3320 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003321{
bellard8df1cd02005-01-28 22:37:22 +00003322 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003323 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003324 hwaddr l = 4;
3325 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003326 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003327 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003328
Paolo Bonzini41063e12015-03-18 14:21:43 +01003329 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003330 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003331 true);
3332 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003333 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003334
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003335#if defined(TARGET_WORDS_BIGENDIAN)
3336 if (endian == DEVICE_LITTLE_ENDIAN) {
3337 val = bswap32(val);
3338 }
3339#else
3340 if (endian == DEVICE_BIG_ENDIAN) {
3341 val = bswap32(val);
3342 }
3343#endif
Peter Maydell50013112015-04-26 16:49:24 +01003344 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003345 } else {
bellard8df1cd02005-01-28 22:37:22 +00003346 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003347 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003348 switch (endian) {
3349 case DEVICE_LITTLE_ENDIAN:
3350 stl_le_p(ptr, val);
3351 break;
3352 case DEVICE_BIG_ENDIAN:
3353 stl_be_p(ptr, val);
3354 break;
3355 default:
3356 stl_p(ptr, val);
3357 break;
3358 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003359 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003360 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003361 }
Peter Maydell50013112015-04-26 16:49:24 +01003362 if (result) {
3363 *result = r;
3364 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003365 if (release_lock) {
3366 qemu_mutex_unlock_iothread();
3367 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003368 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003369}
3370
3371void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3372 MemTxAttrs attrs, MemTxResult *result)
3373{
3374 address_space_stl_internal(as, addr, val, attrs, result,
3375 DEVICE_NATIVE_ENDIAN);
3376}
3377
3378void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3379 MemTxAttrs attrs, MemTxResult *result)
3380{
3381 address_space_stl_internal(as, addr, val, attrs, result,
3382 DEVICE_LITTLE_ENDIAN);
3383}
3384
3385void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3386 MemTxAttrs attrs, MemTxResult *result)
3387{
3388 address_space_stl_internal(as, addr, val, attrs, result,
3389 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003390}
3391
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003392void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003393{
Peter Maydell50013112015-04-26 16:49:24 +01003394 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003395}
3396
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003397void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003398{
Peter Maydell50013112015-04-26 16:49:24 +01003399 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003400}
3401
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003402void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003403{
Peter Maydell50013112015-04-26 16:49:24 +01003404 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003405}
3406
bellardaab33092005-10-30 20:48:42 +00003407/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003408void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3409 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003410{
3411 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003412 MemTxResult r;
3413
3414 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3415 if (result) {
3416 *result = r;
3417 }
3418}
3419
3420void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3421{
3422 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003423}
3424
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003425/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003426static inline void address_space_stw_internal(AddressSpace *as,
3427 hwaddr addr, uint32_t val,
3428 MemTxAttrs attrs,
3429 MemTxResult *result,
3430 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003431{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003432 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003433 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003434 hwaddr l = 2;
3435 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003436 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003437 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003438
Paolo Bonzini41063e12015-03-18 14:21:43 +01003439 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003440 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003441 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003442 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003443
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003444#if defined(TARGET_WORDS_BIGENDIAN)
3445 if (endian == DEVICE_LITTLE_ENDIAN) {
3446 val = bswap16(val);
3447 }
3448#else
3449 if (endian == DEVICE_BIG_ENDIAN) {
3450 val = bswap16(val);
3451 }
3452#endif
Peter Maydell50013112015-04-26 16:49:24 +01003453 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003454 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003455 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003456 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003457 switch (endian) {
3458 case DEVICE_LITTLE_ENDIAN:
3459 stw_le_p(ptr, val);
3460 break;
3461 case DEVICE_BIG_ENDIAN:
3462 stw_be_p(ptr, val);
3463 break;
3464 default:
3465 stw_p(ptr, val);
3466 break;
3467 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003468 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003469 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003470 }
Peter Maydell50013112015-04-26 16:49:24 +01003471 if (result) {
3472 *result = r;
3473 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003474 if (release_lock) {
3475 qemu_mutex_unlock_iothread();
3476 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003477 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003478}
3479
3480void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3481 MemTxAttrs attrs, MemTxResult *result)
3482{
3483 address_space_stw_internal(as, addr, val, attrs, result,
3484 DEVICE_NATIVE_ENDIAN);
3485}
3486
3487void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3488 MemTxAttrs attrs, MemTxResult *result)
3489{
3490 address_space_stw_internal(as, addr, val, attrs, result,
3491 DEVICE_LITTLE_ENDIAN);
3492}
3493
3494void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3495 MemTxAttrs attrs, MemTxResult *result)
3496{
3497 address_space_stw_internal(as, addr, val, attrs, result,
3498 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003499}
3500
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003501void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502{
Peter Maydell50013112015-04-26 16:49:24 +01003503 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003504}
3505
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003506void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507{
Peter Maydell50013112015-04-26 16:49:24 +01003508 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003509}
3510
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003511void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003512{
Peter Maydell50013112015-04-26 16:49:24 +01003513 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003514}
3515
bellardaab33092005-10-30 20:48:42 +00003516/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003517void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3518 MemTxAttrs attrs, MemTxResult *result)
3519{
3520 MemTxResult r;
3521 val = tswap64(val);
3522 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3523 if (result) {
3524 *result = r;
3525 }
3526}
3527
3528void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3529 MemTxAttrs attrs, MemTxResult *result)
3530{
3531 MemTxResult r;
3532 val = cpu_to_le64(val);
3533 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3534 if (result) {
3535 *result = r;
3536 }
3537}
3538void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3539 MemTxAttrs attrs, MemTxResult *result)
3540{
3541 MemTxResult r;
3542 val = cpu_to_be64(val);
3543 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3544 if (result) {
3545 *result = r;
3546 }
3547}
3548
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003549void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003550{
Peter Maydell50013112015-04-26 16:49:24 +01003551 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003552}
3553
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003554void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003555{
Peter Maydell50013112015-04-26 16:49:24 +01003556 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003557}
3558
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003559void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003560{
Peter Maydell50013112015-04-26 16:49:24 +01003561 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003562}
3563
aliguori5e2972f2009-03-28 17:51:36 +00003564/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003565int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003566 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003567{
3568 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003569 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003570 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003571
3572 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003573 int asidx;
3574 MemTxAttrs attrs;
3575
bellard13eb76e2004-01-24 15:23:36 +00003576 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003577 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3578 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003579 /* if no physical page mapped, return an error */
3580 if (phys_addr == -1)
3581 return -1;
3582 l = (page + TARGET_PAGE_SIZE) - addr;
3583 if (l > len)
3584 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003585 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003586 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003587 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3588 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003589 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003590 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3591 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003592 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003593 }
bellard13eb76e2004-01-24 15:23:36 +00003594 len -= l;
3595 buf += l;
3596 addr += l;
3597 }
3598 return 0;
3599}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003600
3601/*
3602 * Allows code that needs to deal with migration bitmaps etc to still be built
3603 * target independent.
3604 */
3605size_t qemu_target_page_bits(void)
3606{
3607 return TARGET_PAGE_BITS;
3608}
3609
Paul Brooka68fe892010-03-01 00:08:59 +00003610#endif
bellard13eb76e2004-01-24 15:23:36 +00003611
Blue Swirl8e4a4242013-01-06 18:30:17 +00003612/*
3613 * A helper function for the _utterly broken_ virtio device model to find out if
3614 * it's running on a big endian machine. Don't do this at home kids!
3615 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003616bool target_words_bigendian(void);
3617bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003618{
3619#if defined(TARGET_WORDS_BIGENDIAN)
3620 return true;
3621#else
3622 return false;
3623#endif
3624}
3625
Wen Congyang76f35532012-05-07 12:04:18 +08003626#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003627bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003628{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003629 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003630 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003631 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003632
Paolo Bonzini41063e12015-03-18 14:21:43 +01003633 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003634 mr = address_space_translate(&address_space_memory,
3635 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003636
Paolo Bonzini41063e12015-03-18 14:21:43 +01003637 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3638 rcu_read_unlock();
3639 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003640}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003641
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003642int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003643{
3644 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003645 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003646
Mike Day0dc3f442013-09-05 14:41:35 -04003647 rcu_read_lock();
3648 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003649 ret = func(block->idstr, block->host, block->offset,
3650 block->used_length, opaque);
3651 if (ret) {
3652 break;
3653 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003654 }
Mike Day0dc3f442013-09-05 14:41:35 -04003655 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003656 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003657}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003658#endif