blob: 6d7f600c0c63392a745563b9a266704192c4ff00 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Peter Lieven101420b2016-07-15 12:03:50 +0200190 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200195 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197}
198
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200{
201 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200202 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 PhysPageEntry e;
204 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210
211 e.skip = leaf ? 0 : 1;
212 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200214 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200217}
218
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200219static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
220 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200221 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222{
223 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200226 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200229 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200231
Paolo Bonzini03f49952013-11-07 17:14:36 +0100232 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200234 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200235 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200236 *index += step;
237 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200238 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200239 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200240 }
241 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200242 }
243}
244
Avi Kivityac1970f2012-10-03 16:22:53 +0200245static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200246 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200247 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000248{
Avi Kivity29990972012-02-13 20:21:20 +0200249 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000251
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200252 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000253}
254
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200255/* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
257 */
Marc-André Lureauefee6782016-09-28 16:37:20 +0400258static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200259{
260 unsigned valid_ptr = P_L2_SIZE;
261 int valid = 0;
262 PhysPageEntry *p;
263 int i;
264
265 if (lp->ptr == PHYS_MAP_NODE_NIL) {
266 return;
267 }
268
269 p = nodes[lp->ptr];
270 for (i = 0; i < P_L2_SIZE; i++) {
271 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272 continue;
273 }
274
275 valid_ptr = i;
276 valid++;
277 if (p[i].skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400278 phys_page_compact(&p[i], nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200279 }
280 }
281
282 /* We can only compress if there's only one child. */
283 if (valid != 1) {
284 return;
285 }
286
287 assert(valid_ptr < P_L2_SIZE);
288
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291 return;
292 }
293
294 lp->ptr = p[valid_ptr].ptr;
295 if (!p[valid_ptr].skip) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
300 * change this rule.
301 */
302 lp->skip = 0;
303 } else {
304 lp->skip += p[valid_ptr].skip;
305 }
306}
307
308static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309{
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200310 if (d->phys_map.skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400311 phys_page_compact(&d->phys_map, d->map.nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200312 }
313}
314
Fam Zheng29cb5332016-03-01 14:18:23 +0800315static inline bool section_covers_addr(const MemoryRegionSection *section,
316 hwaddr addr)
317{
318 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
319 * the section must cover the entire address space.
320 */
321 return section->size.hi ||
322 range_covers_byte(section->offset_within_address_space,
323 section->size.lo, addr);
324}
325
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200326static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200327 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000328{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200329 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200330 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200332
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200333 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200334 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200335 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200336 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100338 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200339 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200340
Fam Zheng29cb5332016-03-01 14:18:23 +0800341 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342 return &sections[lp.ptr];
343 } else {
344 return &sections[PHYS_SECTION_UNASSIGNED];
345 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200346}
347
Blue Swirle5548612012-04-21 13:08:33 +0000348bool memory_region_is_unassigned(MemoryRegion *mr)
349{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200350 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000351 && mr != &io_mem_watch;
352}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200353
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100354/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200355static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 hwaddr addr,
357 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200358{
Fam Zheng729633c2016-03-01 14:18:24 +0800359 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200360 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800361 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200362
Fam Zheng729633c2016-03-01 14:18:24 +0800363 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
364 section_covers_addr(section, addr)) {
365 update = false;
366 } else {
367 section = phys_page_find(d->phys_map, addr, d->map.nodes,
368 d->map.sections);
369 update = true;
370 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200371 if (resolve_subpage && section->mr->subpage) {
372 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200373 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200374 }
Fam Zheng729633c2016-03-01 14:18:24 +0800375 if (update) {
376 atomic_set(&d->mru_section, section);
377 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200378 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200379}
380
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100381/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200382static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200383address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200384 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200385{
386 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200387 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100388 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200389
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200390 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391 /* Compute offset within MemoryRegionSection */
392 addr -= section->offset_within_address_space;
393
394 /* Compute offset within MemoryRegion */
395 *xlat = addr + section->offset_within_region;
396
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200397 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200398
399 /* MMIO registers can be expected to perform full-width accesses based only
400 * on their address, without considering adjacent registers that could
401 * decode to completely different MemoryRegions. When such registers
402 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
403 * regions overlap wildly. For this reason we cannot clamp the accesses
404 * here.
405 *
406 * If the length is small (as is the case for address_space_ldl/stl),
407 * everything works fine. If the incoming length is large, however,
408 * the caller really has to do the clamping through memory_access_size.
409 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200410 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200411 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
413 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200414 return section;
415}
Jan Kiszka90260c62013-05-26 21:46:51 +0200416
Paolo Bonzini41063e12015-03-18 14:21:43 +0100417/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200418MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
419 hwaddr *xlat, hwaddr *plen,
420 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200421{
Avi Kivity30951152012-10-30 13:47:46 +0200422 IOMMUTLBEntry iotlb;
423 MemoryRegionSection *section;
424 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200425
426 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100427 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
428 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200429 mr = section->mr;
430
431 if (!mr->iommu_ops) {
432 break;
433 }
434
Le Tan8d7b8cb2014-08-16 13:55:37 +0800435 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200436 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
437 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700438 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200439 if (!(iotlb.perm & (1 << is_write))) {
440 mr = &io_mem_unassigned;
441 break;
442 }
443
444 as = iotlb.target_as;
445 }
446
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000447 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100448 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700449 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 }
451
Avi Kivity30951152012-10-30 13:47:46 +0200452 *xlat = addr;
453 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200454}
455
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100456/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200457MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000458address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200459 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200460{
Avi Kivity30951152012-10-30 13:47:46 +0200461 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000462 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
463
464 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200465
466 assert(!section->mr->iommu_ops);
467 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200468}
bellard9fa3e852004-01-04 18:06:42 +0000469#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000470
Andreas Färberb170fce2013-01-20 20:23:22 +0100471#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000472
Juan Quintelae59fb372009-09-29 22:48:21 +0200473static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474{
Andreas Färber259186a2013-01-17 18:51:17 +0100475 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476
aurel323098dba2009-03-07 21:28:24 +0000477 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
478 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100479 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100480 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000481
482 return 0;
483}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200484
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485static int cpu_common_pre_load(void *opaque)
486{
487 CPUState *cpu = opaque;
488
Paolo Bonziniadee6422014-12-19 12:53:14 +0100489 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400490
491 return 0;
492}
493
494static bool cpu_common_exception_index_needed(void *opaque)
495{
496 CPUState *cpu = opaque;
497
Paolo Bonziniadee6422014-12-19 12:53:14 +0100498 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400499}
500
501static const VMStateDescription vmstate_cpu_common_exception_index = {
502 .name = "cpu_common/exception_index",
503 .version_id = 1,
504 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200505 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400506 .fields = (VMStateField[]) {
507 VMSTATE_INT32(exception_index, CPUState),
508 VMSTATE_END_OF_LIST()
509 }
510};
511
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300512static bool cpu_common_crash_occurred_needed(void *opaque)
513{
514 CPUState *cpu = opaque;
515
516 return cpu->crash_occurred;
517}
518
519static const VMStateDescription vmstate_cpu_common_crash_occurred = {
520 .name = "cpu_common/crash_occurred",
521 .version_id = 1,
522 .minimum_version_id = 1,
523 .needed = cpu_common_crash_occurred_needed,
524 .fields = (VMStateField[]) {
525 VMSTATE_BOOL(crash_occurred, CPUState),
526 VMSTATE_END_OF_LIST()
527 }
528};
529
Andreas Färber1a1562f2013-06-17 04:09:11 +0200530const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200531 .name = "cpu_common",
532 .version_id = 1,
533 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400534 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200536 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100537 VMSTATE_UINT32(halted, CPUState),
538 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200539 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400540 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200541 .subsections = (const VMStateDescription*[]) {
542 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300543 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200544 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200545 }
546};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200547
pbrook9656f322008-07-01 20:01:19 +0000548#endif
549
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100550CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400551{
Andreas Färberbdc44642013-06-24 23:50:24 +0200552 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400553
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100555 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 }
Glauber Costa950f1472009-06-09 12:15:18 -0400558 }
559
Andreas Färberbdc44642013-06-24 23:50:24 +0200560 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400561}
562
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000563#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000564void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000566 CPUAddressSpace *newas;
567
568 /* Target code should have set num_ases before calling us */
569 assert(asidx < cpu->num_ases);
570
Peter Maydell56943e82016-01-21 14:15:04 +0000571 if (asidx == 0) {
572 /* address space 0 gets the convenience alias */
573 cpu->as = as;
574 }
575
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000576 /* KVM cannot currently support multiple address spaces. */
577 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000578
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000579 if (!cpu->cpu_ases) {
580 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000581 }
Peter Maydell32857f42015-10-01 15:29:50 +0100582
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000583 newas = &cpu->cpu_ases[asidx];
584 newas->cpu = cpu;
585 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000586 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000587 newas->tcg_as_listener.commit = tcg_commit;
588 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000589 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000590}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000591
592AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
593{
594 /* Return the AddressSpace corresponding to the specified index */
595 return cpu->cpu_ases[asidx].as;
596}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000597#endif
598
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530599void cpu_exec_exit(CPUState *cpu)
600{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530601 CPUClass *cc = CPU_GET_CLASS(cpu);
602
Paolo Bonzini267f6852016-08-28 03:45:14 +0200603 cpu_list_remove(cpu);
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530604
605 if (cc->vmsd != NULL) {
606 vmstate_unregister(NULL, cc->vmsd, cpu);
607 }
608 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
609 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
610 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530611}
612
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700613void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000614{
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200615 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Igor Mammedova07f9532016-07-25 11:59:21 +0200616 Error *local_err ATTRIBUTE_UNUSED = NULL;
bellard6a00d602005-11-21 23:25:50 +0000617
Peter Maydell56943e82016-01-21 14:15:04 +0000618 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000619 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000620
Eduardo Habkost291135b2015-04-27 17:00:33 -0300621#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300622 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000623
624 /* This is a softmmu CPU object, so create a property for it
625 * so users can wire up its memory. (This can't go in qom/cpu.c
626 * because that file is compiled only once for both user-mode
627 * and system builds.) The default if no link is set up is to use
628 * the system address space.
629 */
630 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
631 (Object **)&cpu->memory,
632 qdev_prop_allow_set_link_before_realize,
633 OBJ_PROP_LINK_UNREF_ON_RELEASE,
634 &error_abort);
635 cpu->memory = system_memory;
636 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300637#endif
638
Paolo Bonzini267f6852016-08-28 03:45:14 +0200639 cpu_list_add(cpu);
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200640
641#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200643 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200644 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100645 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200646 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100647 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200648#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000649}
650
Paul Brook94df27f2010-02-28 23:47:45 +0000651#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200652static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000653{
654 tb_invalidate_phys_page_range(pc, pc + 1, 0);
655}
656#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200657static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400658{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000659 MemTxAttrs attrs;
660 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
661 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400662 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000663 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100664 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400665 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400666}
bellardc27004e2005-01-03 23:35:10 +0000667#endif
bellardd720b932004-04-25 17:57:43 +0000668
Paul Brookc527ee82010-03-01 03:31:14 +0000669#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200670void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000671
672{
673}
674
Peter Maydell3ee887e2014-09-12 14:06:48 +0100675int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
676 int flags)
677{
678 return -ENOSYS;
679}
680
681void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
682{
683}
684
Andreas Färber75a34032013-09-02 16:57:02 +0200685int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000686 int flags, CPUWatchpoint **watchpoint)
687{
688 return -ENOSYS;
689}
690#else
pbrook6658ffb2007-03-16 23:58:11 +0000691/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200692int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000693 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000694{
aliguoric0ce9982008-11-25 22:13:57 +0000695 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000696
Peter Maydell05068c02014-09-12 14:06:48 +0100697 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700698 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200699 error_report("tried to set invalid watchpoint at %"
700 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000701 return -EINVAL;
702 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500703 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000704
aliguoria1d1bb32008-11-18 20:07:32 +0000705 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100706 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000707 wp->flags = flags;
708
aliguori2dc9f412008-11-18 20:56:59 +0000709 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200710 if (flags & BP_GDB) {
711 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
712 } else {
713 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
714 }
aliguoria1d1bb32008-11-18 20:07:32 +0000715
Andreas Färber31b030d2013-09-04 01:29:02 +0200716 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000717
718 if (watchpoint)
719 *watchpoint = wp;
720 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000721}
722
aliguoria1d1bb32008-11-18 20:07:32 +0000723/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200724int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000725 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000726{
aliguoria1d1bb32008-11-18 20:07:32 +0000727 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000728
Andreas Färberff4700b2013-08-26 18:23:18 +0200729 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100730 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000731 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200732 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000733 return 0;
734 }
735 }
aliguoria1d1bb32008-11-18 20:07:32 +0000736 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000737}
738
aliguoria1d1bb32008-11-18 20:07:32 +0000739/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200740void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000741{
Andreas Färberff4700b2013-08-26 18:23:18 +0200742 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000743
Andreas Färber31b030d2013-09-04 01:29:02 +0200744 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000745
Anthony Liguori7267c092011-08-20 22:09:37 -0500746 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000747}
748
aliguoria1d1bb32008-11-18 20:07:32 +0000749/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200750void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000751{
aliguoric0ce9982008-11-25 22:13:57 +0000752 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000753
Andreas Färberff4700b2013-08-26 18:23:18 +0200754 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200755 if (wp->flags & mask) {
756 cpu_watchpoint_remove_by_ref(cpu, wp);
757 }
aliguoric0ce9982008-11-25 22:13:57 +0000758 }
aliguoria1d1bb32008-11-18 20:07:32 +0000759}
Peter Maydell05068c02014-09-12 14:06:48 +0100760
761/* Return true if this watchpoint address matches the specified
762 * access (ie the address range covered by the watchpoint overlaps
763 * partially or completely with the address range covered by the
764 * access).
765 */
766static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
767 vaddr addr,
768 vaddr len)
769{
770 /* We know the lengths are non-zero, but a little caution is
771 * required to avoid errors in the case where the range ends
772 * exactly at the top of the address space and so addr + len
773 * wraps round to zero.
774 */
775 vaddr wpend = wp->vaddr + wp->len - 1;
776 vaddr addrend = addr + len - 1;
777
778 return !(addr > wpend || wp->vaddr > addrend);
779}
780
Paul Brookc527ee82010-03-01 03:31:14 +0000781#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000782
783/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200784int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000785 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000786{
aliguoric0ce9982008-11-25 22:13:57 +0000787 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000788
Anthony Liguori7267c092011-08-20 22:09:37 -0500789 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000790
791 bp->pc = pc;
792 bp->flags = flags;
793
aliguori2dc9f412008-11-18 20:56:59 +0000794 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200795 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200796 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200797 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200798 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200799 }
aliguoria1d1bb32008-11-18 20:07:32 +0000800
Andreas Färberf0c3c502013-08-26 21:22:53 +0200801 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000802
Andreas Färber00b941e2013-06-29 18:55:54 +0200803 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000804 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200805 }
aliguoria1d1bb32008-11-18 20:07:32 +0000806 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000807}
808
809/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200810int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000811{
aliguoria1d1bb32008-11-18 20:07:32 +0000812 CPUBreakpoint *bp;
813
Andreas Färberf0c3c502013-08-26 21:22:53 +0200814 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000815 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200816 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000817 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000818 }
bellard4c3a88a2003-07-26 12:06:08 +0000819 }
aliguoria1d1bb32008-11-18 20:07:32 +0000820 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000821}
822
aliguoria1d1bb32008-11-18 20:07:32 +0000823/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200824void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000825{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200826 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
827
828 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000829
Anthony Liguori7267c092011-08-20 22:09:37 -0500830 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000831}
832
833/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200834void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000835{
aliguoric0ce9982008-11-25 22:13:57 +0000836 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000837
Andreas Färberf0c3c502013-08-26 21:22:53 +0200838 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200839 if (bp->flags & mask) {
840 cpu_breakpoint_remove_by_ref(cpu, bp);
841 }
aliguoric0ce9982008-11-25 22:13:57 +0000842 }
bellard4c3a88a2003-07-26 12:06:08 +0000843}
844
bellardc33a3462003-07-29 20:50:33 +0000845/* enable or disable single step mode. EXCP_DEBUG is returned by the
846 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200847void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000848{
Andreas Färbered2803d2013-06-21 20:20:45 +0200849 if (cpu->singlestep_enabled != enabled) {
850 cpu->singlestep_enabled = enabled;
851 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200852 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200853 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100854 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000855 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700856 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000857 }
bellardc33a3462003-07-29 20:50:33 +0000858 }
bellardc33a3462003-07-29 20:50:33 +0000859}
860
Andreas Färbera47dddd2013-09-03 17:38:47 +0200861void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000862{
863 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000864 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000865
866 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000867 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000868 fprintf(stderr, "qemu: fatal: ");
869 vfprintf(stderr, fmt, ap);
870 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200871 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100872 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000873 qemu_log("qemu: fatal: ");
874 qemu_log_vprintf(fmt, ap2);
875 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200876 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000877 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000878 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000879 }
pbrook493ae1f2007-11-23 16:53:59 +0000880 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000881 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300882 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200883#if defined(CONFIG_USER_ONLY)
884 {
885 struct sigaction act;
886 sigfillset(&act.sa_mask);
887 act.sa_handler = SIG_DFL;
888 sigaction(SIGABRT, &act, NULL);
889 }
890#endif
bellard75012672003-06-21 13:11:07 +0000891 abort();
892}
893
bellard01243112004-01-04 15:48:17 +0000894#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400895/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200896static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
897{
898 RAMBlock *block;
899
Paolo Bonzini43771532013-09-09 17:58:40 +0200900 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200901 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200902 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200903 }
Mike Day0dc3f442013-09-05 14:41:35 -0400904 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200905 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200906 goto found;
907 }
908 }
909
910 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
911 abort();
912
913found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200914 /* It is safe to write mru_block outside the iothread lock. This
915 * is what happens:
916 *
917 * mru_block = xxx
918 * rcu_read_unlock()
919 * xxx removed from list
920 * rcu_read_lock()
921 * read mru_block
922 * mru_block = NULL;
923 * call_rcu(reclaim_ramblock, xxx);
924 * rcu_read_unlock()
925 *
926 * atomic_rcu_set is not needed here. The block was already published
927 * when it was placed into the list. Here we're just making an extra
928 * copy of the pointer.
929 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200930 ram_list.mru_block = block;
931 return block;
932}
933
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200934static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000935{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700936 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200937 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200938 RAMBlock *block;
939 ram_addr_t end;
940
941 end = TARGET_PAGE_ALIGN(start + length);
942 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000943
Mike Day0dc3f442013-09-05 14:41:35 -0400944 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200945 block = qemu_get_ram_block(start);
946 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200947 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700948 CPU_FOREACH(cpu) {
949 tlb_reset_dirty(cpu, start1, length);
950 }
Mike Day0dc3f442013-09-05 14:41:35 -0400951 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200952}
953
954/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000955bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
956 ram_addr_t length,
957 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200958{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000959 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000960 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000961 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200962
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000963 if (length == 0) {
964 return false;
965 }
966
967 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
968 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000969
970 rcu_read_lock();
971
972 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
973
974 while (page < end) {
975 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
976 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
977 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
978
979 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
980 offset, num);
981 page += num;
982 }
983
984 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000985
986 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200987 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200988 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000989
990 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000991}
992
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100993/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200994hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200995 MemoryRegionSection *section,
996 target_ulong vaddr,
997 hwaddr paddr, hwaddr xlat,
998 int prot,
999 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001000{
Avi Kivitya8170e52012-10-23 12:30:10 +02001001 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001002 CPUWatchpoint *wp;
1003
Blue Swirlcc5bea62012-04-14 14:56:48 +00001004 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001005 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001006 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001007 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001008 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001009 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001010 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001011 }
1012 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001013 AddressSpaceDispatch *d;
1014
1015 d = atomic_rcu_read(&section->address_space->dispatch);
1016 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001017 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001018 }
1019
1020 /* Make accesses to pages with watchpoints go via the
1021 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001022 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001023 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001024 /* Avoid trapping reads of pages with a write breakpoint. */
1025 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001026 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001027 *address |= TLB_MMIO;
1028 break;
1029 }
1030 }
1031 }
1032
1033 return iotlb;
1034}
bellard9fa3e852004-01-04 18:06:42 +00001035#endif /* defined(CONFIG_USER_ONLY) */
1036
pbrooke2eef172008-06-08 01:09:01 +00001037#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001038
Anthony Liguoric227f092009-10-01 16:12:16 -05001039static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001040 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001041static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001042
Igor Mammedova2b257d2014-10-31 16:38:37 +00001043static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1044 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001045
1046/*
1047 * Set a custom physical guest memory alloator.
1048 * Accelerators with unusual needs may need this. Hopefully, we can
1049 * get rid of it eventually.
1050 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001051void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001052{
1053 phys_mem_alloc = alloc;
1054}
1055
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001056static uint16_t phys_section_add(PhysPageMap *map,
1057 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001058{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001059 /* The physical section number is ORed with a page-aligned
1060 * pointer to produce the iotlb entries. Thus it should
1061 * never overflow into the page-aligned value.
1062 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001063 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001064
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001065 if (map->sections_nb == map->sections_nb_alloc) {
1066 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1067 map->sections = g_renew(MemoryRegionSection, map->sections,
1068 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001069 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001070 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001071 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001072 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001073}
1074
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001075static void phys_section_destroy(MemoryRegion *mr)
1076{
Don Slutz55b4e802015-11-30 17:11:04 -05001077 bool have_sub_page = mr->subpage;
1078
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001079 memory_region_unref(mr);
1080
Don Slutz55b4e802015-11-30 17:11:04 -05001081 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001082 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001083 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001084 g_free(subpage);
1085 }
1086}
1087
Paolo Bonzini60926662013-05-29 12:30:26 +02001088static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001089{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001090 while (map->sections_nb > 0) {
1091 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001092 phys_section_destroy(section->mr);
1093 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001094 g_free(map->sections);
1095 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001096}
1097
Avi Kivityac1970f2012-10-03 16:22:53 +02001098static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001099{
1100 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001101 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001102 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001103 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001104 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001105 MemoryRegionSection subsection = {
1106 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001107 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001108 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001109 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001110
Avi Kivityf3705d52012-03-08 16:16:34 +02001111 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001112
Avi Kivityf3705d52012-03-08 16:16:34 +02001113 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001114 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001115 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001116 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001117 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001118 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001119 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001120 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001121 }
1122 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001123 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001124 subpage_register(subpage, start, end,
1125 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001126}
1127
1128
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001129static void register_multipage(AddressSpaceDispatch *d,
1130 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001131{
Avi Kivitya8170e52012-10-23 12:30:10 +02001132 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001133 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001134 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1135 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001136
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001137 assert(num_pages);
1138 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001139}
1140
Avi Kivityac1970f2012-10-03 16:22:53 +02001141static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001142{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001143 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001144 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001145 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001146 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001147
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001148 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1149 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1150 - now.offset_within_address_space;
1151
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001152 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001153 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001154 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001155 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001156 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001157 while (int128_ne(remain.size, now.size)) {
1158 remain.size = int128_sub(remain.size, now.size);
1159 remain.offset_within_address_space += int128_get64(now.size);
1160 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001161 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001162 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001163 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001164 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001165 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001166 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001167 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001168 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001169 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001170 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001171 }
1172}
1173
Sheng Yang62a27442010-01-26 19:21:16 +08001174void qemu_flush_coalesced_mmio_buffer(void)
1175{
1176 if (kvm_enabled())
1177 kvm_flush_coalesced_mmio_buffer();
1178}
1179
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001180void qemu_mutex_lock_ramlist(void)
1181{
1182 qemu_mutex_lock(&ram_list.mutex);
1183}
1184
1185void qemu_mutex_unlock_ramlist(void)
1186{
1187 qemu_mutex_unlock(&ram_list.mutex);
1188}
1189
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001190#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001191static void *file_ram_alloc(RAMBlock *block,
1192 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001193 const char *path,
1194 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001195{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001196 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001197 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001198 char *sanitized_name;
1199 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001200 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001201 int fd = -1;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001202
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001203 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1204 error_setg(errp,
1205 "host lacks kvm mmu notifiers, -mem-path unsupported");
1206 return NULL;
1207 }
1208
1209 for (;;) {
1210 fd = open(path, O_RDWR);
1211 if (fd >= 0) {
1212 /* @path names an existing file, use it */
1213 break;
1214 }
1215 if (errno == ENOENT) {
1216 /* @path names a file that doesn't exist, create it */
1217 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1218 if (fd >= 0) {
1219 unlink_on_error = true;
1220 break;
1221 }
1222 } else if (errno == EISDIR) {
1223 /* @path names a directory, create a file there */
1224 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1225 sanitized_name = g_strdup(memory_region_name(block->mr));
1226 for (c = sanitized_name; *c != '\0'; c++) {
1227 if (*c == '/') {
1228 *c = '_';
1229 }
1230 }
1231
1232 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1233 sanitized_name);
1234 g_free(sanitized_name);
1235
1236 fd = mkstemp(filename);
1237 if (fd >= 0) {
1238 unlink(filename);
1239 g_free(filename);
1240 break;
1241 }
1242 g_free(filename);
1243 }
1244 if (errno != EEXIST && errno != EINTR) {
1245 error_setg_errno(errp, errno,
1246 "can't open backing store %s for guest RAM",
1247 path);
1248 goto error;
1249 }
1250 /*
1251 * Try again on EINTR and EEXIST. The latter happens when
1252 * something else creates the file between our two open().
1253 */
1254 }
1255
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001256 block->page_size = qemu_fd_getpagesize(fd);
Haozhong Zhang83606682016-10-24 20:49:37 +08001257 block->mr->align = block->page_size;
1258#if defined(__s390x__)
1259 if (kvm_enabled()) {
1260 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1261 }
1262#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03001263
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001264 if (memory < block->page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001265 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001266 "or larger than page size 0x%zx",
1267 memory, block->page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001268 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001269 }
1270
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001271 memory = ROUND_UP(memory, block->page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001272
1273 /*
1274 * ftruncate is not supported by hugetlbfs in older
1275 * hosts, so don't bother bailing out on errors.
1276 * If anything goes wrong with it under other filesystems,
1277 * mmap will fail.
1278 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001279 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001280 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001281 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001282
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001283 area = qemu_ram_mmap(fd, memory, block->mr->align,
1284 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001285 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001286 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001287 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001288 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001289 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001290
1291 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001292 os_mem_prealloc(fd, area, memory, errp);
1293 if (errp && *errp) {
1294 goto error;
1295 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001296 }
1297
Alex Williamson04b16652010-07-02 11:13:17 -06001298 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001299 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001300
1301error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001302 if (area != MAP_FAILED) {
1303 qemu_ram_munmap(area, memory);
1304 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001305 if (unlink_on_error) {
1306 unlink(path);
1307 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001308 if (fd != -1) {
1309 close(fd);
1310 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001311 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001312}
1313#endif
1314
Mike Day0dc3f442013-09-05 14:41:35 -04001315/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001316static ram_addr_t find_ram_offset(ram_addr_t size)
1317{
Alex Williamson04b16652010-07-02 11:13:17 -06001318 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001319 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001320
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001321 assert(size != 0); /* it would hand out same offset multiple times */
1322
Mike Day0dc3f442013-09-05 14:41:35 -04001323 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001324 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001325 }
Alex Williamson04b16652010-07-02 11:13:17 -06001326
Mike Day0dc3f442013-09-05 14:41:35 -04001327 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001328 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001329
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001330 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001331
Mike Day0dc3f442013-09-05 14:41:35 -04001332 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001333 if (next_block->offset >= end) {
1334 next = MIN(next, next_block->offset);
1335 }
1336 }
1337 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001338 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001339 mingap = next - end;
1340 }
1341 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001342
1343 if (offset == RAM_ADDR_MAX) {
1344 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1345 (uint64_t)size);
1346 abort();
1347 }
1348
Alex Williamson04b16652010-07-02 11:13:17 -06001349 return offset;
1350}
1351
Juan Quintela652d7ec2012-07-20 10:37:54 +02001352ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001353{
Alex Williamsond17b5282010-06-25 11:08:38 -06001354 RAMBlock *block;
1355 ram_addr_t last = 0;
1356
Mike Day0dc3f442013-09-05 14:41:35 -04001357 rcu_read_lock();
1358 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001359 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001360 }
Mike Day0dc3f442013-09-05 14:41:35 -04001361 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001362 return last;
1363}
1364
Jason Baronddb97f12012-08-02 15:44:16 -04001365static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1366{
1367 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001368
1369 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001370 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001371 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1372 if (ret) {
1373 perror("qemu_madvise");
1374 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1375 "but dump_guest_core=off specified\n");
1376 }
1377 }
1378}
1379
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001380const char *qemu_ram_get_idstr(RAMBlock *rb)
1381{
1382 return rb->idstr;
1383}
1384
Mike Dayae3a7042013-09-05 14:41:35 -04001385/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001386void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001387{
Gongleifa53a0e2016-05-10 10:04:59 +08001388 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001389
Avi Kivityc5705a72011-12-20 15:59:12 +02001390 assert(new_block);
1391 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001392
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001393 if (dev) {
1394 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001395 if (id) {
1396 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001397 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001398 }
1399 }
1400 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1401
Gongleiab0a9952016-05-10 10:05:00 +08001402 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001403 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001404 if (block != new_block &&
1405 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001406 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1407 new_block->idstr);
1408 abort();
1409 }
1410 }
Mike Day0dc3f442013-09-05 14:41:35 -04001411 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001412}
1413
Mike Dayae3a7042013-09-05 14:41:35 -04001414/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001415void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001416{
Mike Dayae3a7042013-09-05 14:41:35 -04001417 /* FIXME: arch_init.c assumes that this is not called throughout
1418 * migration. Ignore the problem since hot-unplug during migration
1419 * does not work anyway.
1420 */
Hu Tao20cfe882014-04-02 15:13:26 +08001421 if (block) {
1422 memset(block->idstr, 0, sizeof(block->idstr));
1423 }
1424}
1425
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001426size_t qemu_ram_pagesize(RAMBlock *rb)
1427{
1428 return rb->page_size;
1429}
1430
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001431static int memory_try_enable_merging(void *addr, size_t len)
1432{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001433 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001434 /* disabled by the user */
1435 return 0;
1436 }
1437
1438 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1439}
1440
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001441/* Only legal before guest might have detected the memory size: e.g. on
1442 * incoming migration, or right after reset.
1443 *
1444 * As memory core doesn't know how is memory accessed, it is up to
1445 * resize callback to update device state and/or add assertions to detect
1446 * misuse, if necessary.
1447 */
Gongleifa53a0e2016-05-10 10:04:59 +08001448int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001449{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001450 assert(block);
1451
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001452 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001453
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001454 if (block->used_length == newsize) {
1455 return 0;
1456 }
1457
1458 if (!(block->flags & RAM_RESIZEABLE)) {
1459 error_setg_errno(errp, EINVAL,
1460 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1461 " in != 0x" RAM_ADDR_FMT, block->idstr,
1462 newsize, block->used_length);
1463 return -EINVAL;
1464 }
1465
1466 if (block->max_length < newsize) {
1467 error_setg_errno(errp, EINVAL,
1468 "Length too large: %s: 0x" RAM_ADDR_FMT
1469 " > 0x" RAM_ADDR_FMT, block->idstr,
1470 newsize, block->max_length);
1471 return -EINVAL;
1472 }
1473
1474 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1475 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001476 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1477 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001478 memory_region_set_size(block->mr, newsize);
1479 if (block->resized) {
1480 block->resized(block->idstr, newsize, block->host);
1481 }
1482 return 0;
1483}
1484
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001485/* Called with ram_list.mutex held */
1486static void dirty_memory_extend(ram_addr_t old_ram_size,
1487 ram_addr_t new_ram_size)
1488{
1489 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1490 DIRTY_MEMORY_BLOCK_SIZE);
1491 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1492 DIRTY_MEMORY_BLOCK_SIZE);
1493 int i;
1494
1495 /* Only need to extend if block count increased */
1496 if (new_num_blocks <= old_num_blocks) {
1497 return;
1498 }
1499
1500 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1501 DirtyMemoryBlocks *old_blocks;
1502 DirtyMemoryBlocks *new_blocks;
1503 int j;
1504
1505 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1506 new_blocks = g_malloc(sizeof(*new_blocks) +
1507 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1508
1509 if (old_num_blocks) {
1510 memcpy(new_blocks->blocks, old_blocks->blocks,
1511 old_num_blocks * sizeof(old_blocks->blocks[0]));
1512 }
1513
1514 for (j = old_num_blocks; j < new_num_blocks; j++) {
1515 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1516 }
1517
1518 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1519
1520 if (old_blocks) {
1521 g_free_rcu(old_blocks, rcu);
1522 }
1523 }
1524}
1525
Fam Zheng528f46a2016-03-01 14:18:18 +08001526static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001527{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001528 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001529 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001530 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001531 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001532
1533 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001534
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001535 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001536 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001537
1538 if (!new_block->host) {
1539 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001540 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001541 new_block->mr, &err);
1542 if (err) {
1543 error_propagate(errp, err);
1544 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001545 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001546 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001547 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001548 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001549 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001550 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001551 error_setg_errno(errp, errno,
1552 "cannot set up guest memory '%s'",
1553 memory_region_name(new_block->mr));
1554 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001555 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001556 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001557 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001558 }
1559 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001560
Li Zhijiandd631692015-07-02 20:18:06 +08001561 new_ram_size = MAX(old_ram_size,
1562 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1563 if (new_ram_size > old_ram_size) {
1564 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001565 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001566 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001567 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1568 * QLIST (which has an RCU-friendly variant) does not have insertion at
1569 * tail, so save the last element in last_block.
1570 */
Mike Day0dc3f442013-09-05 14:41:35 -04001571 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001572 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001573 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001574 break;
1575 }
1576 }
1577 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001578 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001579 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001580 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001581 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001582 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001583 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001584 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001585
Mike Day0dc3f442013-09-05 14:41:35 -04001586 /* Write list before version */
1587 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001588 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001589 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001590
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001591 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001592 new_block->used_length,
1593 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001594
Paolo Bonzinia904c912015-01-21 16:18:35 +01001595 if (new_block->host) {
1596 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1597 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
Cao jinc2cd6272016-09-12 14:34:56 +08001598 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
Paolo Bonzinia904c912015-01-21 16:18:35 +01001599 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001600 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001601}
1602
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001603#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001604RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1605 bool share, const char *mem_path,
1606 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001607{
1608 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001609 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001610
1611 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001612 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001613 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001614 }
1615
1616 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1617 /*
1618 * file_ram_alloc() needs to allocate just like
1619 * phys_mem_alloc, but we haven't bothered to provide
1620 * a hook there.
1621 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001622 error_setg(errp,
1623 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001624 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001625 }
1626
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001627 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001628 new_block = g_malloc0(sizeof(*new_block));
1629 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001630 new_block->used_length = size;
1631 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001632 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001633 new_block->host = file_ram_alloc(new_block, size,
1634 mem_path, errp);
1635 if (!new_block->host) {
1636 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001637 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001638 }
1639
Fam Zheng528f46a2016-03-01 14:18:18 +08001640 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001641 if (local_err) {
1642 g_free(new_block);
1643 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001644 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001645 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001646 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001647}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001648#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001649
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001650static
Fam Zheng528f46a2016-03-01 14:18:18 +08001651RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1652 void (*resized)(const char*,
1653 uint64_t length,
1654 void *host),
1655 void *host, bool resizeable,
1656 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657{
1658 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001659 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001660
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001661 size = HOST_PAGE_ALIGN(size);
1662 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001663 new_block = g_malloc0(sizeof(*new_block));
1664 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001665 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001666 new_block->used_length = size;
1667 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001668 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001669 new_block->fd = -1;
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001670 new_block->page_size = getpagesize();
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001671 new_block->host = host;
1672 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001673 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001674 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001675 if (resizeable) {
1676 new_block->flags |= RAM_RESIZEABLE;
1677 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001678 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001679 if (local_err) {
1680 g_free(new_block);
1681 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001682 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001683 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001684 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001685}
1686
Fam Zheng528f46a2016-03-01 14:18:18 +08001687RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001688 MemoryRegion *mr, Error **errp)
1689{
1690 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1691}
1692
Fam Zheng528f46a2016-03-01 14:18:18 +08001693RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001694{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001695 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1696}
1697
Fam Zheng528f46a2016-03-01 14:18:18 +08001698RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001699 void (*resized)(const char*,
1700 uint64_t length,
1701 void *host),
1702 MemoryRegion *mr, Error **errp)
1703{
1704 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001705}
bellarde9a1ab12007-02-08 23:08:38 +00001706
Paolo Bonzini43771532013-09-09 17:58:40 +02001707static void reclaim_ramblock(RAMBlock *block)
1708{
1709 if (block->flags & RAM_PREALLOC) {
1710 ;
1711 } else if (xen_enabled()) {
1712 xen_invalidate_map_cache_entry(block->host);
1713#ifndef _WIN32
1714 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001715 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001716 close(block->fd);
1717#endif
1718 } else {
1719 qemu_anon_ram_free(block->host, block->max_length);
1720 }
1721 g_free(block);
1722}
1723
Fam Zhengf1060c52016-03-01 14:18:22 +08001724void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001725{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001726 if (!block) {
1727 return;
1728 }
1729
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001730 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001731 QLIST_REMOVE_RCU(block, next);
1732 ram_list.mru_block = NULL;
1733 /* Write list before version */
1734 smp_wmb();
1735 ram_list.version++;
1736 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001737 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001738}
1739
Huang Yingcd19cfa2011-03-02 08:56:19 +01001740#ifndef _WIN32
1741void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1742{
1743 RAMBlock *block;
1744 ram_addr_t offset;
1745 int flags;
1746 void *area, *vaddr;
1747
Mike Day0dc3f442013-09-05 14:41:35 -04001748 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001749 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001750 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001751 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001752 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001753 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001754 } else if (xen_enabled()) {
1755 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001756 } else {
1757 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001758 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001759 flags |= (block->flags & RAM_SHARED ?
1760 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001761 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1762 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001763 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001764 /*
1765 * Remap needs to match alloc. Accelerators that
1766 * set phys_mem_alloc never remap. If they did,
1767 * we'd need a remap hook here.
1768 */
1769 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1770
Huang Yingcd19cfa2011-03-02 08:56:19 +01001771 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1772 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1773 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001774 }
1775 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001776 fprintf(stderr, "Could not remap addr: "
1777 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001778 length, addr);
1779 exit(1);
1780 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001781 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001782 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001783 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001784 }
1785 }
1786}
1787#endif /* !_WIN32 */
1788
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001789/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001790 * This should not be used for general purpose DMA. Use address_space_map
1791 * or address_space_rw instead. For local memory (e.g. video ram) that the
1792 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001793 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001794 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001795 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001796void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001797{
Gonglei3655cb92016-02-20 10:35:20 +08001798 RAMBlock *block = ram_block;
1799
1800 if (block == NULL) {
1801 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001802 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001803 }
Mike Dayae3a7042013-09-05 14:41:35 -04001804
1805 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001806 /* We need to check if the requested address is in the RAM
1807 * because we don't want to map the entire memory in QEMU.
1808 * In that case just map until the end of the page.
1809 */
1810 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001811 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001812 }
Mike Dayae3a7042013-09-05 14:41:35 -04001813
1814 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001815 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001816 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001817}
1818
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001819/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001820 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001821 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001822 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001823 */
Gonglei3655cb92016-02-20 10:35:20 +08001824static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1825 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001826{
Gonglei3655cb92016-02-20 10:35:20 +08001827 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001828 if (*size == 0) {
1829 return NULL;
1830 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001831
Gonglei3655cb92016-02-20 10:35:20 +08001832 if (block == NULL) {
1833 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001834 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001835 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001836 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001837
1838 if (xen_enabled() && block->host == NULL) {
1839 /* We need to check if the requested address is in the RAM
1840 * because we don't want to map the entire memory in QEMU.
1841 * In that case just map the requested area.
1842 */
1843 if (block->offset == 0) {
1844 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001845 }
1846
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001847 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001848 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001849
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001850 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001851}
1852
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001853/*
1854 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1855 * in that RAMBlock.
1856 *
1857 * ptr: Host pointer to look up
1858 * round_offset: If true round the result offset down to a page boundary
1859 * *ram_addr: set to result ram_addr
1860 * *offset: set to result offset within the RAMBlock
1861 *
1862 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001863 *
1864 * By the time this function returns, the returned pointer is not protected
1865 * by RCU anymore. If the caller is not within an RCU critical section and
1866 * does not hold the iothread lock, it must have other means of protecting the
1867 * pointer, such as a reference to the region that includes the incoming
1868 * ram_addr_t.
1869 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001870RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001871 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001872{
pbrook94a6b542009-04-11 17:15:54 +00001873 RAMBlock *block;
1874 uint8_t *host = ptr;
1875
Jan Kiszka868bb332011-06-21 22:59:09 +02001876 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001877 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001878 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001879 ram_addr = xen_ram_addr_from_mapcache(ptr);
1880 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001881 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001882 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001883 }
Mike Day0dc3f442013-09-05 14:41:35 -04001884 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001885 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001886 }
1887
Mike Day0dc3f442013-09-05 14:41:35 -04001888 rcu_read_lock();
1889 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001890 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001891 goto found;
1892 }
1893
Mike Day0dc3f442013-09-05 14:41:35 -04001894 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001895 /* This case append when the block is not mapped. */
1896 if (block->host == NULL) {
1897 continue;
1898 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001899 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001900 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001901 }
pbrook94a6b542009-04-11 17:15:54 +00001902 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001903
Mike Day0dc3f442013-09-05 14:41:35 -04001904 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001905 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001906
1907found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001908 *offset = (host - block->host);
1909 if (round_offset) {
1910 *offset &= TARGET_PAGE_MASK;
1911 }
Mike Day0dc3f442013-09-05 14:41:35 -04001912 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001913 return block;
1914}
1915
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001916/*
1917 * Finds the named RAMBlock
1918 *
1919 * name: The name of RAMBlock to find
1920 *
1921 * Returns: RAMBlock (or NULL if not found)
1922 */
1923RAMBlock *qemu_ram_block_by_name(const char *name)
1924{
1925 RAMBlock *block;
1926
1927 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1928 if (!strcmp(name, block->idstr)) {
1929 return block;
1930 }
1931 }
1932
1933 return NULL;
1934}
1935
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001936/* Some of the softmmu routines need to translate from a host pointer
1937 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001938ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001939{
1940 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001941 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001942
Paolo Bonzinif615f392016-05-26 10:07:50 +02001943 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001944 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001945 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001946 }
1947
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001948 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001949}
Alex Williamsonf471a172010-06-11 11:11:42 -06001950
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001951/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001952static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001953 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001954{
Juan Quintela52159192013-10-08 12:44:04 +02001955 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001956 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001957 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001958 switch (size) {
1959 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001960 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001961 break;
1962 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001963 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001964 break;
1965 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001966 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001967 break;
1968 default:
1969 abort();
1970 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001971 /* Set both VGA and migration bits for simplicity and to remove
1972 * the notdirty callback faster.
1973 */
1974 cpu_physical_memory_set_dirty_range(ram_addr, size,
1975 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001976 /* we remove the notdirty callback only if the code has been
1977 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001978 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001979 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001980 }
bellard1ccde1c2004-02-06 19:46:14 +00001981}
1982
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001983static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1984 unsigned size, bool is_write)
1985{
1986 return is_write;
1987}
1988
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001989static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001990 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001991 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001992 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001993};
1994
pbrook0f459d12008-06-09 00:20:13 +00001995/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001996static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001997{
Andreas Färber93afead2013-08-26 03:41:01 +02001998 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00001999 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002000 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002001 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002002 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002003 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002004 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002005
Andreas Färberff4700b2013-08-26 18:23:18 +02002006 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002007 /* We re-entered the check after replacing the TB. Now raise
2008 * the debug interrupt so that is will trigger after the
2009 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002010 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002011 return;
2012 }
Andreas Färber93afead2013-08-26 03:41:01 +02002013 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002014 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002015 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2016 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002017 if (flags == BP_MEM_READ) {
2018 wp->flags |= BP_WATCHPOINT_HIT_READ;
2019 } else {
2020 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2021 }
2022 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002023 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002024 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002025 if (wp->flags & BP_CPU &&
2026 !cc->debug_check_watchpoint(cpu, wp)) {
2027 wp->flags &= ~BP_WATCHPOINT_HIT;
2028 continue;
2029 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002030 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002031 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002032 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002033 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002034 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002035 } else {
2036 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002037 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002038 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002039 }
aliguori06d55cc2008-11-18 20:24:06 +00002040 }
aliguori6e140f22008-11-18 20:37:55 +00002041 } else {
2042 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002043 }
2044 }
2045}
2046
pbrook6658ffb2007-03-16 23:58:11 +00002047/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2048 so these check for a hit then pass through to the normal out-of-line
2049 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002050static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2051 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002052{
Peter Maydell66b9b432015-04-26 16:49:24 +01002053 MemTxResult res;
2054 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002055 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2056 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002057
Peter Maydell66b9b432015-04-26 16:49:24 +01002058 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002059 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002060 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002061 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002062 break;
2063 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002064 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002065 break;
2066 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002067 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002068 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002069 default: abort();
2070 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002071 *pdata = data;
2072 return res;
2073}
2074
2075static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2076 uint64_t val, unsigned size,
2077 MemTxAttrs attrs)
2078{
2079 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002080 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2081 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002082
2083 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2084 switch (size) {
2085 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002086 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002087 break;
2088 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002089 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002090 break;
2091 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002092 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002093 break;
2094 default: abort();
2095 }
2096 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002097}
2098
Avi Kivity1ec9b902012-01-02 12:47:48 +02002099static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002100 .read_with_attrs = watch_mem_read,
2101 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002102 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002103};
pbrook6658ffb2007-03-16 23:58:11 +00002104
Peter Maydellf25a49e2015-04-26 16:49:24 +01002105static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2106 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002107{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002108 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002109 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002110 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002111
blueswir1db7b5422007-05-26 17:36:03 +00002112#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002113 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002114 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002115#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002116 res = address_space_read(subpage->as, addr + subpage->base,
2117 attrs, buf, len);
2118 if (res) {
2119 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002120 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002121 switch (len) {
2122 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002123 *data = ldub_p(buf);
2124 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002125 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002126 *data = lduw_p(buf);
2127 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002128 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002129 *data = ldl_p(buf);
2130 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002131 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002132 *data = ldq_p(buf);
2133 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002134 default:
2135 abort();
2136 }
blueswir1db7b5422007-05-26 17:36:03 +00002137}
2138
Peter Maydellf25a49e2015-04-26 16:49:24 +01002139static MemTxResult subpage_write(void *opaque, hwaddr addr,
2140 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002141{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002142 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002143 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002144
blueswir1db7b5422007-05-26 17:36:03 +00002145#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002146 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002147 " value %"PRIx64"\n",
2148 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002149#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002150 switch (len) {
2151 case 1:
2152 stb_p(buf, value);
2153 break;
2154 case 2:
2155 stw_p(buf, value);
2156 break;
2157 case 4:
2158 stl_p(buf, value);
2159 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002160 case 8:
2161 stq_p(buf, value);
2162 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002163 default:
2164 abort();
2165 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002166 return address_space_write(subpage->as, addr + subpage->base,
2167 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002168}
2169
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002170static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002171 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002172{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002173 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002174#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002175 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002176 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002177#endif
2178
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002179 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002180 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002181}
2182
Avi Kivity70c68e42012-01-02 12:32:48 +02002183static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002184 .read_with_attrs = subpage_read,
2185 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002186 .impl.min_access_size = 1,
2187 .impl.max_access_size = 8,
2188 .valid.min_access_size = 1,
2189 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002190 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002191 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002192};
2193
Anthony Liguoric227f092009-10-01 16:12:16 -05002194static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002195 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002196{
2197 int idx, eidx;
2198
2199 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2200 return -1;
2201 idx = SUBPAGE_IDX(start);
2202 eidx = SUBPAGE_IDX(end);
2203#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002204 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2205 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002206#endif
blueswir1db7b5422007-05-26 17:36:03 +00002207 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002208 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002209 }
2210
2211 return 0;
2212}
2213
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002214static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002215{
Anthony Liguoric227f092009-10-01 16:12:16 -05002216 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002217
Anthony Liguori7267c092011-08-20 22:09:37 -05002218 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002219
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002220 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002221 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002222 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002223 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002224 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002225#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002226 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2227 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002228#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002229 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002230
2231 return mmio;
2232}
2233
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002234static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2235 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002236{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002237 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002238 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002239 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002240 .mr = mr,
2241 .offset_within_address_space = 0,
2242 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002243 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002244 };
2245
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002246 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002247}
2248
Peter Maydella54c87b2016-01-21 14:15:05 +00002249MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002250{
Peter Maydella54c87b2016-01-21 14:15:05 +00002251 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2252 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002253 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002254 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002255
2256 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002257}
2258
Avi Kivitye9179ce2009-06-14 11:38:52 +03002259static void io_mem_init(void)
2260{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002261 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002262 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002263 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002264 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002265 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002266 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002267 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002268}
2269
Avi Kivityac1970f2012-10-03 16:22:53 +02002270static void mem_begin(MemoryListener *listener)
2271{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002272 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002273 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2274 uint16_t n;
2275
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002276 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002277 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002278 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002279 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002280 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002281 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002282 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002283 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002284
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002285 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002286 d->as = as;
2287 as->next_dispatch = d;
2288}
2289
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002290static void address_space_dispatch_free(AddressSpaceDispatch *d)
2291{
2292 phys_sections_free(&d->map);
2293 g_free(d);
2294}
2295
Paolo Bonzini00752702013-05-29 12:13:54 +02002296static void mem_commit(MemoryListener *listener)
2297{
2298 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002299 AddressSpaceDispatch *cur = as->dispatch;
2300 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002301
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002302 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002303
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002304 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002305 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002306 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002307 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002308}
2309
Avi Kivity1d711482012-10-02 18:54:45 +02002310static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002311{
Peter Maydell32857f42015-10-01 15:29:50 +01002312 CPUAddressSpace *cpuas;
2313 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002314
2315 /* since each CPU stores ram addresses in its TLB cache, we must
2316 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002317 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2318 cpu_reloading_memory_map();
2319 /* The CPU and TLB are protected by the iothread lock.
2320 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2321 * may have split the RCU critical section.
2322 */
2323 d = atomic_rcu_read(&cpuas->as->dispatch);
2324 cpuas->memory_dispatch = d;
2325 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002326}
2327
Avi Kivityac1970f2012-10-03 16:22:53 +02002328void address_space_init_dispatch(AddressSpace *as)
2329{
Paolo Bonzini00752702013-05-29 12:13:54 +02002330 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002331 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002332 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002333 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002334 .region_add = mem_add,
2335 .region_nop = mem_add,
2336 .priority = 0,
2337 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002338 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002339}
2340
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002341void address_space_unregister(AddressSpace *as)
2342{
2343 memory_listener_unregister(&as->dispatch_listener);
2344}
2345
Avi Kivity83f3c252012-10-07 12:59:55 +02002346void address_space_destroy_dispatch(AddressSpace *as)
2347{
2348 AddressSpaceDispatch *d = as->dispatch;
2349
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002350 atomic_rcu_set(&as->dispatch, NULL);
2351 if (d) {
2352 call_rcu(d, address_space_dispatch_free, rcu);
2353 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002354}
2355
Avi Kivity62152b82011-07-26 14:26:14 +03002356static void memory_map_init(void)
2357{
Anthony Liguori7267c092011-08-20 22:09:37 -05002358 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002359
Paolo Bonzini57271d62013-11-07 17:14:37 +01002360 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002361 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002362
Anthony Liguori7267c092011-08-20 22:09:37 -05002363 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002364 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2365 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002366 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002367}
2368
2369MemoryRegion *get_system_memory(void)
2370{
2371 return system_memory;
2372}
2373
Avi Kivity309cb472011-08-08 16:09:03 +03002374MemoryRegion *get_system_io(void)
2375{
2376 return system_io;
2377}
2378
pbrooke2eef172008-06-08 01:09:01 +00002379#endif /* !defined(CONFIG_USER_ONLY) */
2380
bellard13eb76e2004-01-24 15:23:36 +00002381/* physical memory access (slow version, mainly for debug) */
2382#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002383int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002384 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002385{
2386 int l, flags;
2387 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002388 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002389
2390 while (len > 0) {
2391 page = addr & TARGET_PAGE_MASK;
2392 l = (page + TARGET_PAGE_SIZE) - addr;
2393 if (l > len)
2394 l = len;
2395 flags = page_get_flags(page);
2396 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002397 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002398 if (is_write) {
2399 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002400 return -1;
bellard579a97f2007-11-11 14:26:47 +00002401 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002402 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002403 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002404 memcpy(p, buf, l);
2405 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002406 } else {
2407 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002408 return -1;
bellard579a97f2007-11-11 14:26:47 +00002409 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002410 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002411 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002412 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002413 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002414 }
2415 len -= l;
2416 buf += l;
2417 addr += l;
2418 }
Paul Brooka68fe892010-03-01 00:08:59 +00002419 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002420}
bellard8df1cd02005-01-28 22:37:22 +00002421
bellard13eb76e2004-01-24 15:23:36 +00002422#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002423
Paolo Bonzini845b6212015-03-23 11:45:53 +01002424static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002425 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002426{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002427 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002428 addr += memory_region_get_ram_addr(mr);
2429
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002430 /* No early return if dirty_log_mask is or becomes 0, because
2431 * cpu_physical_memory_set_dirty_range will still call
2432 * xen_modified_memory.
2433 */
2434 if (dirty_log_mask) {
2435 dirty_log_mask =
2436 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002437 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002438 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2439 tb_invalidate_phys_range(addr, addr + length);
2440 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2441 }
2442 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002443}
2444
Richard Henderson23326162013-07-08 14:55:59 -07002445static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002446{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002447 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002448
2449 /* Regions are assumed to support 1-4 byte accesses unless
2450 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002451 if (access_size_max == 0) {
2452 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002453 }
Richard Henderson23326162013-07-08 14:55:59 -07002454
2455 /* Bound the maximum access by the alignment of the address. */
2456 if (!mr->ops->impl.unaligned) {
2457 unsigned align_size_max = addr & -addr;
2458 if (align_size_max != 0 && align_size_max < access_size_max) {
2459 access_size_max = align_size_max;
2460 }
2461 }
2462
2463 /* Don't attempt accesses larger than the maximum. */
2464 if (l > access_size_max) {
2465 l = access_size_max;
2466 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002467 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002468
2469 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002470}
2471
Jan Kiszka4840f102015-06-18 18:47:22 +02002472static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002473{
Jan Kiszka4840f102015-06-18 18:47:22 +02002474 bool unlocked = !qemu_mutex_iothread_locked();
2475 bool release_lock = false;
2476
2477 if (unlocked && mr->global_locking) {
2478 qemu_mutex_lock_iothread();
2479 unlocked = false;
2480 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002481 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002482 if (mr->flush_coalesced_mmio) {
2483 if (unlocked) {
2484 qemu_mutex_lock_iothread();
2485 }
2486 qemu_flush_coalesced_mmio_buffer();
2487 if (unlocked) {
2488 qemu_mutex_unlock_iothread();
2489 }
2490 }
2491
2492 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002493}
2494
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002495/* Called within RCU critical section. */
2496static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2497 MemTxAttrs attrs,
2498 const uint8_t *buf,
2499 int len, hwaddr addr1,
2500 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002501{
bellard13eb76e2004-01-24 15:23:36 +00002502 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002503 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002504 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002505 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002506
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002507 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002508 if (!memory_access_is_direct(mr, true)) {
2509 release_lock |= prepare_mmio_access(mr);
2510 l = memory_access_size(mr, l, addr1);
2511 /* XXX: could force current_cpu to NULL to avoid
2512 potential bugs */
2513 switch (l) {
2514 case 8:
2515 /* 64 bit write access */
2516 val = ldq_p(buf);
2517 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2518 attrs);
2519 break;
2520 case 4:
2521 /* 32 bit write access */
2522 val = ldl_p(buf);
2523 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2524 attrs);
2525 break;
2526 case 2:
2527 /* 16 bit write access */
2528 val = lduw_p(buf);
2529 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2530 attrs);
2531 break;
2532 case 1:
2533 /* 8 bit write access */
2534 val = ldub_p(buf);
2535 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2536 attrs);
2537 break;
2538 default:
2539 abort();
bellard13eb76e2004-01-24 15:23:36 +00002540 }
2541 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002542 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002543 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002544 memcpy(ptr, buf, l);
2545 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002546 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002547
2548 if (release_lock) {
2549 qemu_mutex_unlock_iothread();
2550 release_lock = false;
2551 }
2552
bellard13eb76e2004-01-24 15:23:36 +00002553 len -= l;
2554 buf += l;
2555 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002556
2557 if (!len) {
2558 break;
2559 }
2560
2561 l = len;
2562 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002563 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002564
Peter Maydell3b643492015-04-26 16:49:23 +01002565 return result;
bellard13eb76e2004-01-24 15:23:36 +00002566}
bellard8df1cd02005-01-28 22:37:22 +00002567
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002568MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2569 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002570{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002571 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002572 hwaddr addr1;
2573 MemoryRegion *mr;
2574 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002575
2576 if (len > 0) {
2577 rcu_read_lock();
2578 l = len;
2579 mr = address_space_translate(as, addr, &addr1, &l, true);
2580 result = address_space_write_continue(as, addr, attrs, buf, len,
2581 addr1, l, mr);
2582 rcu_read_unlock();
2583 }
2584
2585 return result;
2586}
2587
2588/* Called within RCU critical section. */
2589MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2590 MemTxAttrs attrs, uint8_t *buf,
2591 int len, hwaddr addr1, hwaddr l,
2592 MemoryRegion *mr)
2593{
2594 uint8_t *ptr;
2595 uint64_t val;
2596 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002597 bool release_lock = false;
2598
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002599 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002600 if (!memory_access_is_direct(mr, false)) {
2601 /* I/O case */
2602 release_lock |= prepare_mmio_access(mr);
2603 l = memory_access_size(mr, l, addr1);
2604 switch (l) {
2605 case 8:
2606 /* 64 bit read access */
2607 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2608 attrs);
2609 stq_p(buf, val);
2610 break;
2611 case 4:
2612 /* 32 bit read access */
2613 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2614 attrs);
2615 stl_p(buf, val);
2616 break;
2617 case 2:
2618 /* 16 bit read access */
2619 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2620 attrs);
2621 stw_p(buf, val);
2622 break;
2623 case 1:
2624 /* 8 bit read access */
2625 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2626 attrs);
2627 stb_p(buf, val);
2628 break;
2629 default:
2630 abort();
2631 }
2632 } else {
2633 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002634 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002635 memcpy(buf, ptr, l);
2636 }
2637
2638 if (release_lock) {
2639 qemu_mutex_unlock_iothread();
2640 release_lock = false;
2641 }
2642
2643 len -= l;
2644 buf += l;
2645 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002646
2647 if (!len) {
2648 break;
2649 }
2650
2651 l = len;
2652 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002653 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002654
2655 return result;
2656}
2657
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002658MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2659 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002660{
2661 hwaddr l;
2662 hwaddr addr1;
2663 MemoryRegion *mr;
2664 MemTxResult result = MEMTX_OK;
2665
2666 if (len > 0) {
2667 rcu_read_lock();
2668 l = len;
2669 mr = address_space_translate(as, addr, &addr1, &l, false);
2670 result = address_space_read_continue(as, addr, attrs, buf, len,
2671 addr1, l, mr);
2672 rcu_read_unlock();
2673 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002674
2675 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002676}
2677
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002678MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2679 uint8_t *buf, int len, bool is_write)
2680{
2681 if (is_write) {
2682 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2683 } else {
2684 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2685 }
2686}
Avi Kivityac1970f2012-10-03 16:22:53 +02002687
Avi Kivitya8170e52012-10-23 12:30:10 +02002688void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002689 int len, int is_write)
2690{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002691 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2692 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002693}
2694
Alexander Graf582b55a2013-12-11 14:17:44 +01002695enum write_rom_type {
2696 WRITE_DATA,
2697 FLUSH_CACHE,
2698};
2699
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002700static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002701 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002702{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002703 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002704 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002705 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002706 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002707
Paolo Bonzini41063e12015-03-18 14:21:43 +01002708 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002709 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002710 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002711 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002712
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002713 if (!(memory_region_is_ram(mr) ||
2714 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002715 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002716 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002717 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002718 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002719 switch (type) {
2720 case WRITE_DATA:
2721 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002722 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002723 break;
2724 case FLUSH_CACHE:
2725 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2726 break;
2727 }
bellardd0ecd2a2006-04-23 17:14:48 +00002728 }
2729 len -= l;
2730 buf += l;
2731 addr += l;
2732 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002733 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002734}
2735
Alexander Graf582b55a2013-12-11 14:17:44 +01002736/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002737void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002738 const uint8_t *buf, int len)
2739{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002740 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002741}
2742
2743void cpu_flush_icache_range(hwaddr start, int len)
2744{
2745 /*
2746 * This function should do the same thing as an icache flush that was
2747 * triggered from within the guest. For TCG we are always cache coherent,
2748 * so there is no need to flush anything. For KVM / Xen we need to flush
2749 * the host's instruction cache at least.
2750 */
2751 if (tcg_enabled()) {
2752 return;
2753 }
2754
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002755 cpu_physical_memory_write_rom_internal(&address_space_memory,
2756 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002757}
2758
aliguori6d16c2f2009-01-22 16:59:11 +00002759typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002760 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002761 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002762 hwaddr addr;
2763 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002764 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002765} BounceBuffer;
2766
2767static BounceBuffer bounce;
2768
aliguoriba223c22009-01-22 16:59:16 +00002769typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002770 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002771 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002772} MapClient;
2773
Fam Zheng38e047b2015-03-16 17:03:35 +08002774QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002775static QLIST_HEAD(map_client_list, MapClient) map_client_list
2776 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002777
Fam Zhenge95205e2015-03-16 17:03:37 +08002778static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002779{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002780 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002781 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002782}
2783
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002784static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002785{
2786 MapClient *client;
2787
Blue Swirl72cf2d42009-09-12 07:36:22 +00002788 while (!QLIST_EMPTY(&map_client_list)) {
2789 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002790 qemu_bh_schedule(client->bh);
2791 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002792 }
2793}
2794
Fam Zhenge95205e2015-03-16 17:03:37 +08002795void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002796{
2797 MapClient *client = g_malloc(sizeof(*client));
2798
Fam Zheng38e047b2015-03-16 17:03:35 +08002799 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002800 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002801 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002802 if (!atomic_read(&bounce.in_use)) {
2803 cpu_notify_map_clients_locked();
2804 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002805 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002806}
2807
Fam Zheng38e047b2015-03-16 17:03:35 +08002808void cpu_exec_init_all(void)
2809{
2810 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002811 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002812 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002813 qemu_mutex_init(&map_client_list_lock);
2814}
2815
Fam Zhenge95205e2015-03-16 17:03:37 +08002816void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002817{
Fam Zhenge95205e2015-03-16 17:03:37 +08002818 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002819
Fam Zhenge95205e2015-03-16 17:03:37 +08002820 qemu_mutex_lock(&map_client_list_lock);
2821 QLIST_FOREACH(client, &map_client_list, link) {
2822 if (client->bh == bh) {
2823 cpu_unregister_map_client_do(client);
2824 break;
2825 }
2826 }
2827 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002828}
2829
2830static void cpu_notify_map_clients(void)
2831{
Fam Zheng38e047b2015-03-16 17:03:35 +08002832 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002833 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002834 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002835}
2836
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002837bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2838{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002839 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002840 hwaddr l, xlat;
2841
Paolo Bonzini41063e12015-03-18 14:21:43 +01002842 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002843 while (len > 0) {
2844 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002845 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2846 if (!memory_access_is_direct(mr, is_write)) {
2847 l = memory_access_size(mr, l, addr);
2848 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002849 return false;
2850 }
2851 }
2852
2853 len -= l;
2854 addr += l;
2855 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002856 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002857 return true;
2858}
2859
aliguori6d16c2f2009-01-22 16:59:11 +00002860/* Map a physical memory region into a host virtual address.
2861 * May map a subset of the requested range, given by and returned in *plen.
2862 * May return NULL if resources needed to perform the mapping are exhausted.
2863 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002864 * Use cpu_register_map_client() to know when retrying the map operation is
2865 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002866 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002867void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002868 hwaddr addr,
2869 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002870 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002871{
Avi Kivitya8170e52012-10-23 12:30:10 +02002872 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002873 hwaddr done = 0;
2874 hwaddr l, xlat, base;
2875 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002876 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002877
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002878 if (len == 0) {
2879 return NULL;
2880 }
aliguori6d16c2f2009-01-22 16:59:11 +00002881
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002882 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002883 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002884 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002885
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002886 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002887 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002888 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002889 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002890 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002891 /* Avoid unbounded allocations */
2892 l = MIN(l, TARGET_PAGE_SIZE);
2893 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002894 bounce.addr = addr;
2895 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002896
2897 memory_region_ref(mr);
2898 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002899 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002900 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2901 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002902 }
aliguori6d16c2f2009-01-22 16:59:11 +00002903
Paolo Bonzini41063e12015-03-18 14:21:43 +01002904 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002905 *plen = l;
2906 return bounce.buffer;
2907 }
2908
2909 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002910
2911 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002912 len -= l;
2913 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002914 done += l;
2915 if (len == 0) {
2916 break;
2917 }
2918
2919 l = len;
2920 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2921 if (this_mr != mr || xlat != base + done) {
2922 break;
2923 }
aliguori6d16c2f2009-01-22 16:59:11 +00002924 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002925
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002926 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002927 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002928 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002929 rcu_read_unlock();
2930
2931 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002932}
2933
Avi Kivityac1970f2012-10-03 16:22:53 +02002934/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002935 * Will also mark the memory as dirty if is_write == 1. access_len gives
2936 * the amount of memory that was actually read or written by the caller.
2937 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002938void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2939 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002940{
2941 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002942 MemoryRegion *mr;
2943 ram_addr_t addr1;
2944
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002945 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002946 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002947 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002948 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002949 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002950 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002951 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002952 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002953 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002954 return;
2955 }
2956 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002957 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2958 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002959 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002960 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002961 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002962 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002963 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002964 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002965}
bellardd0ecd2a2006-04-23 17:14:48 +00002966
Avi Kivitya8170e52012-10-23 12:30:10 +02002967void *cpu_physical_memory_map(hwaddr addr,
2968 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002969 int is_write)
2970{
2971 return address_space_map(&address_space_memory, addr, plen, is_write);
2972}
2973
Avi Kivitya8170e52012-10-23 12:30:10 +02002974void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2975 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002976{
2977 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2978}
2979
bellard8df1cd02005-01-28 22:37:22 +00002980/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002981static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2982 MemTxAttrs attrs,
2983 MemTxResult *result,
2984 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002985{
bellard8df1cd02005-01-28 22:37:22 +00002986 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002987 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002988 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002989 hwaddr l = 4;
2990 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002991 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002992 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002993
Paolo Bonzini41063e12015-03-18 14:21:43 +01002994 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002995 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002996 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002997 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02002998
bellard8df1cd02005-01-28 22:37:22 +00002999 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003000 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003001#if defined(TARGET_WORDS_BIGENDIAN)
3002 if (endian == DEVICE_LITTLE_ENDIAN) {
3003 val = bswap32(val);
3004 }
3005#else
3006 if (endian == DEVICE_BIG_ENDIAN) {
3007 val = bswap32(val);
3008 }
3009#endif
bellard8df1cd02005-01-28 22:37:22 +00003010 } else {
3011 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003012 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003013 switch (endian) {
3014 case DEVICE_LITTLE_ENDIAN:
3015 val = ldl_le_p(ptr);
3016 break;
3017 case DEVICE_BIG_ENDIAN:
3018 val = ldl_be_p(ptr);
3019 break;
3020 default:
3021 val = ldl_p(ptr);
3022 break;
3023 }
Peter Maydell50013112015-04-26 16:49:24 +01003024 r = MEMTX_OK;
3025 }
3026 if (result) {
3027 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003028 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003029 if (release_lock) {
3030 qemu_mutex_unlock_iothread();
3031 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003032 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003033 return val;
3034}
3035
Peter Maydell50013112015-04-26 16:49:24 +01003036uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3037 MemTxAttrs attrs, MemTxResult *result)
3038{
3039 return address_space_ldl_internal(as, addr, attrs, result,
3040 DEVICE_NATIVE_ENDIAN);
3041}
3042
3043uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3044 MemTxAttrs attrs, MemTxResult *result)
3045{
3046 return address_space_ldl_internal(as, addr, attrs, result,
3047 DEVICE_LITTLE_ENDIAN);
3048}
3049
3050uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3051 MemTxAttrs attrs, MemTxResult *result)
3052{
3053 return address_space_ldl_internal(as, addr, attrs, result,
3054 DEVICE_BIG_ENDIAN);
3055}
3056
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003057uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003058{
Peter Maydell50013112015-04-26 16:49:24 +01003059 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003060}
3061
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003062uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003063{
Peter Maydell50013112015-04-26 16:49:24 +01003064 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003065}
3066
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003067uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003068{
Peter Maydell50013112015-04-26 16:49:24 +01003069 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003070}
3071
bellard84b7b8e2005-11-28 21:19:04 +00003072/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003073static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3074 MemTxAttrs attrs,
3075 MemTxResult *result,
3076 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003077{
bellard84b7b8e2005-11-28 21:19:04 +00003078 uint8_t *ptr;
3079 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003080 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003081 hwaddr l = 8;
3082 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003083 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003084 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003085
Paolo Bonzini41063e12015-03-18 14:21:43 +01003086 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003087 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003088 false);
3089 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003090 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003091
bellard84b7b8e2005-11-28 21:19:04 +00003092 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003093 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003094#if defined(TARGET_WORDS_BIGENDIAN)
3095 if (endian == DEVICE_LITTLE_ENDIAN) {
3096 val = bswap64(val);
3097 }
3098#else
3099 if (endian == DEVICE_BIG_ENDIAN) {
3100 val = bswap64(val);
3101 }
3102#endif
bellard84b7b8e2005-11-28 21:19:04 +00003103 } else {
3104 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003105 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003106 switch (endian) {
3107 case DEVICE_LITTLE_ENDIAN:
3108 val = ldq_le_p(ptr);
3109 break;
3110 case DEVICE_BIG_ENDIAN:
3111 val = ldq_be_p(ptr);
3112 break;
3113 default:
3114 val = ldq_p(ptr);
3115 break;
3116 }
Peter Maydell50013112015-04-26 16:49:24 +01003117 r = MEMTX_OK;
3118 }
3119 if (result) {
3120 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003121 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003122 if (release_lock) {
3123 qemu_mutex_unlock_iothread();
3124 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003125 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003126 return val;
3127}
3128
Peter Maydell50013112015-04-26 16:49:24 +01003129uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3130 MemTxAttrs attrs, MemTxResult *result)
3131{
3132 return address_space_ldq_internal(as, addr, attrs, result,
3133 DEVICE_NATIVE_ENDIAN);
3134}
3135
3136uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3137 MemTxAttrs attrs, MemTxResult *result)
3138{
3139 return address_space_ldq_internal(as, addr, attrs, result,
3140 DEVICE_LITTLE_ENDIAN);
3141}
3142
3143uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3144 MemTxAttrs attrs, MemTxResult *result)
3145{
3146 return address_space_ldq_internal(as, addr, attrs, result,
3147 DEVICE_BIG_ENDIAN);
3148}
3149
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003150uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003151{
Peter Maydell50013112015-04-26 16:49:24 +01003152 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003153}
3154
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003155uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003156{
Peter Maydell50013112015-04-26 16:49:24 +01003157 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003158}
3159
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003160uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003161{
Peter Maydell50013112015-04-26 16:49:24 +01003162 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003163}
3164
bellardaab33092005-10-30 20:48:42 +00003165/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003166uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3167 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003168{
3169 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003170 MemTxResult r;
3171
3172 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3173 if (result) {
3174 *result = r;
3175 }
bellardaab33092005-10-30 20:48:42 +00003176 return val;
3177}
3178
Peter Maydell50013112015-04-26 16:49:24 +01003179uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3180{
3181 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3182}
3183
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003184/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003185static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3186 hwaddr addr,
3187 MemTxAttrs attrs,
3188 MemTxResult *result,
3189 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003190{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003191 uint8_t *ptr;
3192 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003193 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003194 hwaddr l = 2;
3195 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003196 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003197 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003198
Paolo Bonzini41063e12015-03-18 14:21:43 +01003199 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003200 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003201 false);
3202 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003203 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003204
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003205 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003206 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003207#if defined(TARGET_WORDS_BIGENDIAN)
3208 if (endian == DEVICE_LITTLE_ENDIAN) {
3209 val = bswap16(val);
3210 }
3211#else
3212 if (endian == DEVICE_BIG_ENDIAN) {
3213 val = bswap16(val);
3214 }
3215#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003216 } else {
3217 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003218 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003219 switch (endian) {
3220 case DEVICE_LITTLE_ENDIAN:
3221 val = lduw_le_p(ptr);
3222 break;
3223 case DEVICE_BIG_ENDIAN:
3224 val = lduw_be_p(ptr);
3225 break;
3226 default:
3227 val = lduw_p(ptr);
3228 break;
3229 }
Peter Maydell50013112015-04-26 16:49:24 +01003230 r = MEMTX_OK;
3231 }
3232 if (result) {
3233 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003234 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003235 if (release_lock) {
3236 qemu_mutex_unlock_iothread();
3237 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003238 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003239 return val;
bellardaab33092005-10-30 20:48:42 +00003240}
3241
Peter Maydell50013112015-04-26 16:49:24 +01003242uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3243 MemTxAttrs attrs, MemTxResult *result)
3244{
3245 return address_space_lduw_internal(as, addr, attrs, result,
3246 DEVICE_NATIVE_ENDIAN);
3247}
3248
3249uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3250 MemTxAttrs attrs, MemTxResult *result)
3251{
3252 return address_space_lduw_internal(as, addr, attrs, result,
3253 DEVICE_LITTLE_ENDIAN);
3254}
3255
3256uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3257 MemTxAttrs attrs, MemTxResult *result)
3258{
3259 return address_space_lduw_internal(as, addr, attrs, result,
3260 DEVICE_BIG_ENDIAN);
3261}
3262
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003263uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003264{
Peter Maydell50013112015-04-26 16:49:24 +01003265 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003266}
3267
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003268uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003269{
Peter Maydell50013112015-04-26 16:49:24 +01003270 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003271}
3272
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003273uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003274{
Peter Maydell50013112015-04-26 16:49:24 +01003275 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003276}
3277
bellard8df1cd02005-01-28 22:37:22 +00003278/* warning: addr must be aligned. The ram page is not masked as dirty
3279 and the code inside is not invalidated. It is useful if the dirty
3280 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003281void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3282 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003283{
bellard8df1cd02005-01-28 22:37:22 +00003284 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003285 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003286 hwaddr l = 4;
3287 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003288 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003289 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003290 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003291
Paolo Bonzini41063e12015-03-18 14:21:43 +01003292 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003293 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003294 true);
3295 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003296 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003297
Peter Maydell50013112015-04-26 16:49:24 +01003298 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003299 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003300 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003301 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003302
Paolo Bonzini845b6212015-03-23 11:45:53 +01003303 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3304 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003305 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3306 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003307 r = MEMTX_OK;
3308 }
3309 if (result) {
3310 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003311 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003312 if (release_lock) {
3313 qemu_mutex_unlock_iothread();
3314 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003315 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003316}
3317
Peter Maydell50013112015-04-26 16:49:24 +01003318void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3319{
3320 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3321}
3322
bellard8df1cd02005-01-28 22:37:22 +00003323/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003324static inline void address_space_stl_internal(AddressSpace *as,
3325 hwaddr addr, uint32_t val,
3326 MemTxAttrs attrs,
3327 MemTxResult *result,
3328 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003329{
bellard8df1cd02005-01-28 22:37:22 +00003330 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003331 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003332 hwaddr l = 4;
3333 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003334 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003335 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003336
Paolo Bonzini41063e12015-03-18 14:21:43 +01003337 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003338 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003339 true);
3340 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003341 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003342
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003343#if defined(TARGET_WORDS_BIGENDIAN)
3344 if (endian == DEVICE_LITTLE_ENDIAN) {
3345 val = bswap32(val);
3346 }
3347#else
3348 if (endian == DEVICE_BIG_ENDIAN) {
3349 val = bswap32(val);
3350 }
3351#endif
Peter Maydell50013112015-04-26 16:49:24 +01003352 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003353 } else {
bellard8df1cd02005-01-28 22:37:22 +00003354 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003355 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003356 switch (endian) {
3357 case DEVICE_LITTLE_ENDIAN:
3358 stl_le_p(ptr, val);
3359 break;
3360 case DEVICE_BIG_ENDIAN:
3361 stl_be_p(ptr, val);
3362 break;
3363 default:
3364 stl_p(ptr, val);
3365 break;
3366 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003367 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003368 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003369 }
Peter Maydell50013112015-04-26 16:49:24 +01003370 if (result) {
3371 *result = r;
3372 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003373 if (release_lock) {
3374 qemu_mutex_unlock_iothread();
3375 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003376 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003377}
3378
3379void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3380 MemTxAttrs attrs, MemTxResult *result)
3381{
3382 address_space_stl_internal(as, addr, val, attrs, result,
3383 DEVICE_NATIVE_ENDIAN);
3384}
3385
3386void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3387 MemTxAttrs attrs, MemTxResult *result)
3388{
3389 address_space_stl_internal(as, addr, val, attrs, result,
3390 DEVICE_LITTLE_ENDIAN);
3391}
3392
3393void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3394 MemTxAttrs attrs, MemTxResult *result)
3395{
3396 address_space_stl_internal(as, addr, val, attrs, result,
3397 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003398}
3399
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003400void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003401{
Peter Maydell50013112015-04-26 16:49:24 +01003402 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003403}
3404
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003405void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003406{
Peter Maydell50013112015-04-26 16:49:24 +01003407 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003408}
3409
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003410void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003411{
Peter Maydell50013112015-04-26 16:49:24 +01003412 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003413}
3414
bellardaab33092005-10-30 20:48:42 +00003415/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003416void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3417 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003418{
3419 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003420 MemTxResult r;
3421
3422 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3423 if (result) {
3424 *result = r;
3425 }
3426}
3427
3428void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3429{
3430 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003431}
3432
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003433/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003434static inline void address_space_stw_internal(AddressSpace *as,
3435 hwaddr addr, uint32_t val,
3436 MemTxAttrs attrs,
3437 MemTxResult *result,
3438 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003439{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003440 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003441 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003442 hwaddr l = 2;
3443 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003444 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003445 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003446
Paolo Bonzini41063e12015-03-18 14:21:43 +01003447 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003448 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003449 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003450 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003451
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003452#if defined(TARGET_WORDS_BIGENDIAN)
3453 if (endian == DEVICE_LITTLE_ENDIAN) {
3454 val = bswap16(val);
3455 }
3456#else
3457 if (endian == DEVICE_BIG_ENDIAN) {
3458 val = bswap16(val);
3459 }
3460#endif
Peter Maydell50013112015-04-26 16:49:24 +01003461 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003462 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003463 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003464 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003465 switch (endian) {
3466 case DEVICE_LITTLE_ENDIAN:
3467 stw_le_p(ptr, val);
3468 break;
3469 case DEVICE_BIG_ENDIAN:
3470 stw_be_p(ptr, val);
3471 break;
3472 default:
3473 stw_p(ptr, val);
3474 break;
3475 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003476 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003477 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003478 }
Peter Maydell50013112015-04-26 16:49:24 +01003479 if (result) {
3480 *result = r;
3481 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003482 if (release_lock) {
3483 qemu_mutex_unlock_iothread();
3484 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003485 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003486}
3487
3488void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3489 MemTxAttrs attrs, MemTxResult *result)
3490{
3491 address_space_stw_internal(as, addr, val, attrs, result,
3492 DEVICE_NATIVE_ENDIAN);
3493}
3494
3495void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3496 MemTxAttrs attrs, MemTxResult *result)
3497{
3498 address_space_stw_internal(as, addr, val, attrs, result,
3499 DEVICE_LITTLE_ENDIAN);
3500}
3501
3502void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3503 MemTxAttrs attrs, MemTxResult *result)
3504{
3505 address_space_stw_internal(as, addr, val, attrs, result,
3506 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003507}
3508
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003509void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003510{
Peter Maydell50013112015-04-26 16:49:24 +01003511 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003512}
3513
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003514void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003515{
Peter Maydell50013112015-04-26 16:49:24 +01003516 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003517}
3518
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003519void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003520{
Peter Maydell50013112015-04-26 16:49:24 +01003521 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003522}
3523
bellardaab33092005-10-30 20:48:42 +00003524/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003525void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3526 MemTxAttrs attrs, MemTxResult *result)
3527{
3528 MemTxResult r;
3529 val = tswap64(val);
3530 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3531 if (result) {
3532 *result = r;
3533 }
3534}
3535
3536void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3537 MemTxAttrs attrs, MemTxResult *result)
3538{
3539 MemTxResult r;
3540 val = cpu_to_le64(val);
3541 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3542 if (result) {
3543 *result = r;
3544 }
3545}
3546void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3547 MemTxAttrs attrs, MemTxResult *result)
3548{
3549 MemTxResult r;
3550 val = cpu_to_be64(val);
3551 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3552 if (result) {
3553 *result = r;
3554 }
3555}
3556
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003557void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003558{
Peter Maydell50013112015-04-26 16:49:24 +01003559 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003560}
3561
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003562void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003563{
Peter Maydell50013112015-04-26 16:49:24 +01003564 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003565}
3566
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003567void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003568{
Peter Maydell50013112015-04-26 16:49:24 +01003569 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003570}
3571
aliguori5e2972f2009-03-28 17:51:36 +00003572/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003573int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003574 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003575{
3576 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003577 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003578 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003579
3580 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003581 int asidx;
3582 MemTxAttrs attrs;
3583
bellard13eb76e2004-01-24 15:23:36 +00003584 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003585 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3586 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003587 /* if no physical page mapped, return an error */
3588 if (phys_addr == -1)
3589 return -1;
3590 l = (page + TARGET_PAGE_SIZE) - addr;
3591 if (l > len)
3592 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003593 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003594 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003595 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3596 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003597 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003598 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3599 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003600 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003601 }
bellard13eb76e2004-01-24 15:23:36 +00003602 len -= l;
3603 buf += l;
3604 addr += l;
3605 }
3606 return 0;
3607}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003608
3609/*
3610 * Allows code that needs to deal with migration bitmaps etc to still be built
3611 * target independent.
3612 */
3613size_t qemu_target_page_bits(void)
3614{
3615 return TARGET_PAGE_BITS;
3616}
3617
Paul Brooka68fe892010-03-01 00:08:59 +00003618#endif
bellard13eb76e2004-01-24 15:23:36 +00003619
Blue Swirl8e4a4242013-01-06 18:30:17 +00003620/*
3621 * A helper function for the _utterly broken_ virtio device model to find out if
3622 * it's running on a big endian machine. Don't do this at home kids!
3623 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003624bool target_words_bigendian(void);
3625bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003626{
3627#if defined(TARGET_WORDS_BIGENDIAN)
3628 return true;
3629#else
3630 return false;
3631#endif
3632}
3633
Wen Congyang76f35532012-05-07 12:04:18 +08003634#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003635bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003636{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003637 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003638 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003639 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003640
Paolo Bonzini41063e12015-03-18 14:21:43 +01003641 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003642 mr = address_space_translate(&address_space_memory,
3643 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003644
Paolo Bonzini41063e12015-03-18 14:21:43 +01003645 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3646 rcu_read_unlock();
3647 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003648}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003649
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003650int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003651{
3652 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003653 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003654
Mike Day0dc3f442013-09-05 14:41:35 -04003655 rcu_read_lock();
3656 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003657 ret = func(block->idstr, block->host, block->offset,
3658 block->used_length, opaque);
3659 if (ret) {
3660 break;
3661 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003662 }
Mike Day0dc3f442013-09-05 14:41:35 -04003663 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003664 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003665}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003666#endif