blob: 9c97a0a4a247ea1dba8f4f72aceaae06562a91bf [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Peter Lieven101420b2016-07-15 12:03:50 +0200190 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200195 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197}
198
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200{
201 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200202 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 PhysPageEntry e;
204 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210
211 e.skip = leaf ? 0 : 1;
212 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200214 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200217}
218
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200219static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
220 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200221 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222{
223 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200226 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200229 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200231
Paolo Bonzini03f49952013-11-07 17:14:36 +0100232 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200234 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200235 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200236 *index += step;
237 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200238 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200239 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200240 }
241 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200242 }
243}
244
Avi Kivityac1970f2012-10-03 16:22:53 +0200245static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200246 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200247 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000248{
Avi Kivity29990972012-02-13 20:21:20 +0200249 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000251
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200252 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000253}
254
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200255/* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
257 */
Marc-André Lureauefee6782016-09-28 16:37:20 +0400258static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200259{
260 unsigned valid_ptr = P_L2_SIZE;
261 int valid = 0;
262 PhysPageEntry *p;
263 int i;
264
265 if (lp->ptr == PHYS_MAP_NODE_NIL) {
266 return;
267 }
268
269 p = nodes[lp->ptr];
270 for (i = 0; i < P_L2_SIZE; i++) {
271 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272 continue;
273 }
274
275 valid_ptr = i;
276 valid++;
277 if (p[i].skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400278 phys_page_compact(&p[i], nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200279 }
280 }
281
282 /* We can only compress if there's only one child. */
283 if (valid != 1) {
284 return;
285 }
286
287 assert(valid_ptr < P_L2_SIZE);
288
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291 return;
292 }
293
294 lp->ptr = p[valid_ptr].ptr;
295 if (!p[valid_ptr].skip) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
300 * change this rule.
301 */
302 lp->skip = 0;
303 } else {
304 lp->skip += p[valid_ptr].skip;
305 }
306}
307
308static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309{
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200310 if (d->phys_map.skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400311 phys_page_compact(&d->phys_map, d->map.nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200312 }
313}
314
Fam Zheng29cb5332016-03-01 14:18:23 +0800315static inline bool section_covers_addr(const MemoryRegionSection *section,
316 hwaddr addr)
317{
318 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
319 * the section must cover the entire address space.
320 */
321 return section->size.hi ||
322 range_covers_byte(section->offset_within_address_space,
323 section->size.lo, addr);
324}
325
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200326static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200327 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000328{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200329 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200330 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200332
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200333 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200334 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200335 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200336 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100338 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200339 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200340
Fam Zheng29cb5332016-03-01 14:18:23 +0800341 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342 return &sections[lp.ptr];
343 } else {
344 return &sections[PHYS_SECTION_UNASSIGNED];
345 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200346}
347
Blue Swirle5548612012-04-21 13:08:33 +0000348bool memory_region_is_unassigned(MemoryRegion *mr)
349{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200350 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000351 && mr != &io_mem_watch;
352}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200353
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100354/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200355static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200356 hwaddr addr,
357 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200358{
Fam Zheng729633c2016-03-01 14:18:24 +0800359 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200360 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800361 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200362
Fam Zheng729633c2016-03-01 14:18:24 +0800363 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
364 section_covers_addr(section, addr)) {
365 update = false;
366 } else {
367 section = phys_page_find(d->phys_map, addr, d->map.nodes,
368 d->map.sections);
369 update = true;
370 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200371 if (resolve_subpage && section->mr->subpage) {
372 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200373 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200374 }
Fam Zheng729633c2016-03-01 14:18:24 +0800375 if (update) {
376 atomic_set(&d->mru_section, section);
377 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200378 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200379}
380
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100381/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200382static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200383address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200384 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200385{
386 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200387 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100388 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200389
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200390 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391 /* Compute offset within MemoryRegionSection */
392 addr -= section->offset_within_address_space;
393
394 /* Compute offset within MemoryRegion */
395 *xlat = addr + section->offset_within_region;
396
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200397 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200398
399 /* MMIO registers can be expected to perform full-width accesses based only
400 * on their address, without considering adjacent registers that could
401 * decode to completely different MemoryRegions. When such registers
402 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
403 * regions overlap wildly. For this reason we cannot clamp the accesses
404 * here.
405 *
406 * If the length is small (as is the case for address_space_ldl/stl),
407 * everything works fine. If the incoming length is large, however,
408 * the caller really has to do the clamping through memory_access_size.
409 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200410 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200411 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
413 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200414 return section;
415}
Jan Kiszka90260c62013-05-26 21:46:51 +0200416
Paolo Bonzini41063e12015-03-18 14:21:43 +0100417/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200418MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
419 hwaddr *xlat, hwaddr *plen,
420 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200421{
Avi Kivity30951152012-10-30 13:47:46 +0200422 IOMMUTLBEntry iotlb;
423 MemoryRegionSection *section;
424 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200425
426 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100427 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
428 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200429 mr = section->mr;
430
431 if (!mr->iommu_ops) {
432 break;
433 }
434
Le Tan8d7b8cb2014-08-16 13:55:37 +0800435 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200436 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
437 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700438 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200439 if (!(iotlb.perm & (1 << is_write))) {
440 mr = &io_mem_unassigned;
441 break;
442 }
443
444 as = iotlb.target_as;
445 }
446
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000447 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100448 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700449 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 }
451
Avi Kivity30951152012-10-30 13:47:46 +0200452 *xlat = addr;
453 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200454}
455
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100456/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200457MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000458address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200459 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200460{
Avi Kivity30951152012-10-30 13:47:46 +0200461 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000462 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
463
464 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200465
466 assert(!section->mr->iommu_ops);
467 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200468}
bellard9fa3e852004-01-04 18:06:42 +0000469#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000470
Andreas Färberb170fce2013-01-20 20:23:22 +0100471#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000472
Juan Quintelae59fb372009-09-29 22:48:21 +0200473static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474{
Andreas Färber259186a2013-01-17 18:51:17 +0100475 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476
aurel323098dba2009-03-07 21:28:24 +0000477 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
478 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100479 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100480 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000481
482 return 0;
483}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200484
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400485static int cpu_common_pre_load(void *opaque)
486{
487 CPUState *cpu = opaque;
488
Paolo Bonziniadee6422014-12-19 12:53:14 +0100489 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400490
491 return 0;
492}
493
494static bool cpu_common_exception_index_needed(void *opaque)
495{
496 CPUState *cpu = opaque;
497
Paolo Bonziniadee6422014-12-19 12:53:14 +0100498 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400499}
500
501static const VMStateDescription vmstate_cpu_common_exception_index = {
502 .name = "cpu_common/exception_index",
503 .version_id = 1,
504 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200505 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400506 .fields = (VMStateField[]) {
507 VMSTATE_INT32(exception_index, CPUState),
508 VMSTATE_END_OF_LIST()
509 }
510};
511
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300512static bool cpu_common_crash_occurred_needed(void *opaque)
513{
514 CPUState *cpu = opaque;
515
516 return cpu->crash_occurred;
517}
518
519static const VMStateDescription vmstate_cpu_common_crash_occurred = {
520 .name = "cpu_common/crash_occurred",
521 .version_id = 1,
522 .minimum_version_id = 1,
523 .needed = cpu_common_crash_occurred_needed,
524 .fields = (VMStateField[]) {
525 VMSTATE_BOOL(crash_occurred, CPUState),
526 VMSTATE_END_OF_LIST()
527 }
528};
529
Andreas Färber1a1562f2013-06-17 04:09:11 +0200530const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200531 .name = "cpu_common",
532 .version_id = 1,
533 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400534 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200535 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200536 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100537 VMSTATE_UINT32(halted, CPUState),
538 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200539 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400540 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200541 .subsections = (const VMStateDescription*[]) {
542 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300543 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200544 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200545 }
546};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200547
pbrook9656f322008-07-01 20:01:19 +0000548#endif
549
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100550CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400551{
Andreas Färberbdc44642013-06-24 23:50:24 +0200552 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400553
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100555 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 }
Glauber Costa950f1472009-06-09 12:15:18 -0400558 }
559
Andreas Färberbdc44642013-06-24 23:50:24 +0200560 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400561}
562
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000563#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000564void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000566 CPUAddressSpace *newas;
567
568 /* Target code should have set num_ases before calling us */
569 assert(asidx < cpu->num_ases);
570
Peter Maydell56943e82016-01-21 14:15:04 +0000571 if (asidx == 0) {
572 /* address space 0 gets the convenience alias */
573 cpu->as = as;
574 }
575
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000576 /* KVM cannot currently support multiple address spaces. */
577 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000578
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000579 if (!cpu->cpu_ases) {
580 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000581 }
Peter Maydell32857f42015-10-01 15:29:50 +0100582
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000583 newas = &cpu->cpu_ases[asidx];
584 newas->cpu = cpu;
585 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000586 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000587 newas->tcg_as_listener.commit = tcg_commit;
588 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000589 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000590}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000591
592AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
593{
594 /* Return the AddressSpace corresponding to the specified index */
595 return cpu->cpu_ases[asidx].as;
596}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000597#endif
598
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530599void cpu_exec_exit(CPUState *cpu)
600{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530601 CPUClass *cc = CPU_GET_CLASS(cpu);
602
Paolo Bonzini267f6852016-08-28 03:45:14 +0200603 cpu_list_remove(cpu);
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530604
605 if (cc->vmsd != NULL) {
606 vmstate_unregister(NULL, cc->vmsd, cpu);
607 }
608 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
609 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
610 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530611}
612
Laurent Vivier39e329e2016-10-20 13:26:02 +0200613void cpu_exec_initfn(CPUState *cpu)
bellardfd6ce8f2003-05-14 19:00:11 +0000614{
Peter Maydell56943e82016-01-21 14:15:04 +0000615 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000616 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000617
Eduardo Habkost291135b2015-04-27 17:00:33 -0300618#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300619 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000620
621 /* This is a softmmu CPU object, so create a property for it
622 * so users can wire up its memory. (This can't go in qom/cpu.c
623 * because that file is compiled only once for both user-mode
624 * and system builds.) The default if no link is set up is to use
625 * the system address space.
626 */
627 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
628 (Object **)&cpu->memory,
629 qdev_prop_allow_set_link_before_realize,
630 OBJ_PROP_LINK_UNREF_ON_RELEASE,
631 &error_abort);
632 cpu->memory = system_memory;
633 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300634#endif
Laurent Vivier39e329e2016-10-20 13:26:02 +0200635}
636
Laurent Vivierce5b1bb2016-10-20 13:26:03 +0200637void cpu_exec_realizefn(CPUState *cpu, Error **errp)
Laurent Vivier39e329e2016-10-20 13:26:02 +0200638{
639 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Eduardo Habkost291135b2015-04-27 17:00:33 -0300640
Paolo Bonzini267f6852016-08-28 03:45:14 +0200641 cpu_list_add(cpu);
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200642
643#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200644 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200645 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200646 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100647 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200648 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100649 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200650#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000651}
652
Paul Brook94df27f2010-02-28 23:47:45 +0000653#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200654static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000655{
656 tb_invalidate_phys_page_range(pc, pc + 1, 0);
657}
658#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200659static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400660{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000661 MemTxAttrs attrs;
662 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
663 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400664 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000665 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100666 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400667 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400668}
bellardc27004e2005-01-03 23:35:10 +0000669#endif
bellardd720b932004-04-25 17:57:43 +0000670
Paul Brookc527ee82010-03-01 03:31:14 +0000671#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200672void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000673
674{
675}
676
Peter Maydell3ee887e2014-09-12 14:06:48 +0100677int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
678 int flags)
679{
680 return -ENOSYS;
681}
682
683void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
684{
685}
686
Andreas Färber75a34032013-09-02 16:57:02 +0200687int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000688 int flags, CPUWatchpoint **watchpoint)
689{
690 return -ENOSYS;
691}
692#else
pbrook6658ffb2007-03-16 23:58:11 +0000693/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200694int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000695 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000696{
aliguoric0ce9982008-11-25 22:13:57 +0000697 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000698
Peter Maydell05068c02014-09-12 14:06:48 +0100699 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700700 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200701 error_report("tried to set invalid watchpoint at %"
702 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000703 return -EINVAL;
704 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500705 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000706
aliguoria1d1bb32008-11-18 20:07:32 +0000707 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100708 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000709 wp->flags = flags;
710
aliguori2dc9f412008-11-18 20:56:59 +0000711 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200712 if (flags & BP_GDB) {
713 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
714 } else {
715 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
716 }
aliguoria1d1bb32008-11-18 20:07:32 +0000717
Andreas Färber31b030d2013-09-04 01:29:02 +0200718 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000719
720 if (watchpoint)
721 *watchpoint = wp;
722 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000723}
724
aliguoria1d1bb32008-11-18 20:07:32 +0000725/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200726int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000727 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000728{
aliguoria1d1bb32008-11-18 20:07:32 +0000729 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000730
Andreas Färberff4700b2013-08-26 18:23:18 +0200731 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100732 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000733 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200734 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000735 return 0;
736 }
737 }
aliguoria1d1bb32008-11-18 20:07:32 +0000738 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000739}
740
aliguoria1d1bb32008-11-18 20:07:32 +0000741/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200742void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000743{
Andreas Färberff4700b2013-08-26 18:23:18 +0200744 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000745
Andreas Färber31b030d2013-09-04 01:29:02 +0200746 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000747
Anthony Liguori7267c092011-08-20 22:09:37 -0500748 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000749}
750
aliguoria1d1bb32008-11-18 20:07:32 +0000751/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200752void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000753{
aliguoric0ce9982008-11-25 22:13:57 +0000754 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000755
Andreas Färberff4700b2013-08-26 18:23:18 +0200756 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200757 if (wp->flags & mask) {
758 cpu_watchpoint_remove_by_ref(cpu, wp);
759 }
aliguoric0ce9982008-11-25 22:13:57 +0000760 }
aliguoria1d1bb32008-11-18 20:07:32 +0000761}
Peter Maydell05068c02014-09-12 14:06:48 +0100762
763/* Return true if this watchpoint address matches the specified
764 * access (ie the address range covered by the watchpoint overlaps
765 * partially or completely with the address range covered by the
766 * access).
767 */
768static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
769 vaddr addr,
770 vaddr len)
771{
772 /* We know the lengths are non-zero, but a little caution is
773 * required to avoid errors in the case where the range ends
774 * exactly at the top of the address space and so addr + len
775 * wraps round to zero.
776 */
777 vaddr wpend = wp->vaddr + wp->len - 1;
778 vaddr addrend = addr + len - 1;
779
780 return !(addr > wpend || wp->vaddr > addrend);
781}
782
Paul Brookc527ee82010-03-01 03:31:14 +0000783#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000784
785/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200786int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000787 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000788{
aliguoric0ce9982008-11-25 22:13:57 +0000789 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000790
Anthony Liguori7267c092011-08-20 22:09:37 -0500791 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000792
793 bp->pc = pc;
794 bp->flags = flags;
795
aliguori2dc9f412008-11-18 20:56:59 +0000796 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200797 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200798 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200799 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200800 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200801 }
aliguoria1d1bb32008-11-18 20:07:32 +0000802
Andreas Färberf0c3c502013-08-26 21:22:53 +0200803 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000804
Andreas Färber00b941e2013-06-29 18:55:54 +0200805 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000806 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200807 }
aliguoria1d1bb32008-11-18 20:07:32 +0000808 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000809}
810
811/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200812int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000813{
aliguoria1d1bb32008-11-18 20:07:32 +0000814 CPUBreakpoint *bp;
815
Andreas Färberf0c3c502013-08-26 21:22:53 +0200816 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000817 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200818 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000819 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000820 }
bellard4c3a88a2003-07-26 12:06:08 +0000821 }
aliguoria1d1bb32008-11-18 20:07:32 +0000822 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000823}
824
aliguoria1d1bb32008-11-18 20:07:32 +0000825/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200826void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000827{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200828 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
829
830 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000831
Anthony Liguori7267c092011-08-20 22:09:37 -0500832 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000833}
834
835/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200836void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000837{
aliguoric0ce9982008-11-25 22:13:57 +0000838 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000839
Andreas Färberf0c3c502013-08-26 21:22:53 +0200840 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200841 if (bp->flags & mask) {
842 cpu_breakpoint_remove_by_ref(cpu, bp);
843 }
aliguoric0ce9982008-11-25 22:13:57 +0000844 }
bellard4c3a88a2003-07-26 12:06:08 +0000845}
846
bellardc33a3462003-07-29 20:50:33 +0000847/* enable or disable single step mode. EXCP_DEBUG is returned by the
848 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200849void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000850{
Andreas Färbered2803d2013-06-21 20:20:45 +0200851 if (cpu->singlestep_enabled != enabled) {
852 cpu->singlestep_enabled = enabled;
853 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200854 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200855 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100856 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000857 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700858 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000859 }
bellardc33a3462003-07-29 20:50:33 +0000860 }
bellardc33a3462003-07-29 20:50:33 +0000861}
862
Andreas Färbera47dddd2013-09-03 17:38:47 +0200863void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000864{
865 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000866 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000867
868 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000869 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000870 fprintf(stderr, "qemu: fatal: ");
871 vfprintf(stderr, fmt, ap);
872 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200873 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100874 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000875 qemu_log("qemu: fatal: ");
876 qemu_log_vprintf(fmt, ap2);
877 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200878 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000879 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000880 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000881 }
pbrook493ae1f2007-11-23 16:53:59 +0000882 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000883 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300884 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200885#if defined(CONFIG_USER_ONLY)
886 {
887 struct sigaction act;
888 sigfillset(&act.sa_mask);
889 act.sa_handler = SIG_DFL;
890 sigaction(SIGABRT, &act, NULL);
891 }
892#endif
bellard75012672003-06-21 13:11:07 +0000893 abort();
894}
895
bellard01243112004-01-04 15:48:17 +0000896#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400897/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200898static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
899{
900 RAMBlock *block;
901
Paolo Bonzini43771532013-09-09 17:58:40 +0200902 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200903 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200904 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200905 }
Mike Day0dc3f442013-09-05 14:41:35 -0400906 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200907 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200908 goto found;
909 }
910 }
911
912 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
913 abort();
914
915found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200916 /* It is safe to write mru_block outside the iothread lock. This
917 * is what happens:
918 *
919 * mru_block = xxx
920 * rcu_read_unlock()
921 * xxx removed from list
922 * rcu_read_lock()
923 * read mru_block
924 * mru_block = NULL;
925 * call_rcu(reclaim_ramblock, xxx);
926 * rcu_read_unlock()
927 *
928 * atomic_rcu_set is not needed here. The block was already published
929 * when it was placed into the list. Here we're just making an extra
930 * copy of the pointer.
931 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200932 ram_list.mru_block = block;
933 return block;
934}
935
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200936static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000937{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700938 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200940 RAMBlock *block;
941 ram_addr_t end;
942
943 end = TARGET_PAGE_ALIGN(start + length);
944 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000945
Mike Day0dc3f442013-09-05 14:41:35 -0400946 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200947 block = qemu_get_ram_block(start);
948 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200949 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700950 CPU_FOREACH(cpu) {
951 tlb_reset_dirty(cpu, start1, length);
952 }
Mike Day0dc3f442013-09-05 14:41:35 -0400953 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200954}
955
956/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000957bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
958 ram_addr_t length,
959 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200960{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000961 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000962 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000963 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200964
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000965 if (length == 0) {
966 return false;
967 }
968
969 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
970 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000971
972 rcu_read_lock();
973
974 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
975
976 while (page < end) {
977 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
978 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
979 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
980
981 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
982 offset, num);
983 page += num;
984 }
985
986 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000987
988 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200989 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200990 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000991
992 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +0000993}
994
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100995/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200996hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200997 MemoryRegionSection *section,
998 target_ulong vaddr,
999 hwaddr paddr, hwaddr xlat,
1000 int prot,
1001 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001002{
Avi Kivitya8170e52012-10-23 12:30:10 +02001003 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001004 CPUWatchpoint *wp;
1005
Blue Swirlcc5bea62012-04-14 14:56:48 +00001006 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001007 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001008 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001009 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001010 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001011 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001012 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001013 }
1014 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001015 AddressSpaceDispatch *d;
1016
1017 d = atomic_rcu_read(&section->address_space->dispatch);
1018 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001019 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001020 }
1021
1022 /* Make accesses to pages with watchpoints go via the
1023 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001024 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001025 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001026 /* Avoid trapping reads of pages with a write breakpoint. */
1027 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001028 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001029 *address |= TLB_MMIO;
1030 break;
1031 }
1032 }
1033 }
1034
1035 return iotlb;
1036}
bellard9fa3e852004-01-04 18:06:42 +00001037#endif /* defined(CONFIG_USER_ONLY) */
1038
pbrooke2eef172008-06-08 01:09:01 +00001039#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001040
Anthony Liguoric227f092009-10-01 16:12:16 -05001041static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001042 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001043static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001044
Igor Mammedova2b257d2014-10-31 16:38:37 +00001045static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1046 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001047
1048/*
1049 * Set a custom physical guest memory alloator.
1050 * Accelerators with unusual needs may need this. Hopefully, we can
1051 * get rid of it eventually.
1052 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001053void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001054{
1055 phys_mem_alloc = alloc;
1056}
1057
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001058static uint16_t phys_section_add(PhysPageMap *map,
1059 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001060{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001061 /* The physical section number is ORed with a page-aligned
1062 * pointer to produce the iotlb entries. Thus it should
1063 * never overflow into the page-aligned value.
1064 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001065 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001066
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001067 if (map->sections_nb == map->sections_nb_alloc) {
1068 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1069 map->sections = g_renew(MemoryRegionSection, map->sections,
1070 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001071 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001072 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001073 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001074 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001075}
1076
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001077static void phys_section_destroy(MemoryRegion *mr)
1078{
Don Slutz55b4e802015-11-30 17:11:04 -05001079 bool have_sub_page = mr->subpage;
1080
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001081 memory_region_unref(mr);
1082
Don Slutz55b4e802015-11-30 17:11:04 -05001083 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001084 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001085 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001086 g_free(subpage);
1087 }
1088}
1089
Paolo Bonzini60926662013-05-29 12:30:26 +02001090static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001091{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001092 while (map->sections_nb > 0) {
1093 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001094 phys_section_destroy(section->mr);
1095 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001096 g_free(map->sections);
1097 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001098}
1099
Avi Kivityac1970f2012-10-03 16:22:53 +02001100static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001101{
1102 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001103 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001104 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001105 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001107 MemoryRegionSection subsection = {
1108 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001109 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001110 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001111 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001112
Avi Kivityf3705d52012-03-08 16:16:34 +02001113 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001114
Avi Kivityf3705d52012-03-08 16:16:34 +02001115 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001116 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001117 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001118 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001119 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001120 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001121 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001122 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001123 }
1124 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001125 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001126 subpage_register(subpage, start, end,
1127 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001128}
1129
1130
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001131static void register_multipage(AddressSpaceDispatch *d,
1132 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001133{
Avi Kivitya8170e52012-10-23 12:30:10 +02001134 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001135 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001136 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1137 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001138
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001139 assert(num_pages);
1140 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001141}
1142
Avi Kivityac1970f2012-10-03 16:22:53 +02001143static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001145 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001146 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001147 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001149
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001150 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1151 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1152 - now.offset_within_address_space;
1153
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001154 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001155 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001156 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001157 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001158 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001159 while (int128_ne(remain.size, now.size)) {
1160 remain.size = int128_sub(remain.size, now.size);
1161 remain.offset_within_address_space += int128_get64(now.size);
1162 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001163 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001164 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001165 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001166 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001167 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001168 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001169 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001170 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001171 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001172 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001173 }
1174}
1175
Sheng Yang62a27442010-01-26 19:21:16 +08001176void qemu_flush_coalesced_mmio_buffer(void)
1177{
1178 if (kvm_enabled())
1179 kvm_flush_coalesced_mmio_buffer();
1180}
1181
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001182void qemu_mutex_lock_ramlist(void)
1183{
1184 qemu_mutex_lock(&ram_list.mutex);
1185}
1186
1187void qemu_mutex_unlock_ramlist(void)
1188{
1189 qemu_mutex_unlock(&ram_list.mutex);
1190}
1191
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001192#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001193static void *file_ram_alloc(RAMBlock *block,
1194 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001195 const char *path,
1196 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001197{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001198 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001199 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001200 char *sanitized_name;
1201 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001202 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001203 int fd = -1;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001204
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001205 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1206 error_setg(errp,
1207 "host lacks kvm mmu notifiers, -mem-path unsupported");
1208 return NULL;
1209 }
1210
1211 for (;;) {
1212 fd = open(path, O_RDWR);
1213 if (fd >= 0) {
1214 /* @path names an existing file, use it */
1215 break;
1216 }
1217 if (errno == ENOENT) {
1218 /* @path names a file that doesn't exist, create it */
1219 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1220 if (fd >= 0) {
1221 unlink_on_error = true;
1222 break;
1223 }
1224 } else if (errno == EISDIR) {
1225 /* @path names a directory, create a file there */
1226 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1227 sanitized_name = g_strdup(memory_region_name(block->mr));
1228 for (c = sanitized_name; *c != '\0'; c++) {
1229 if (*c == '/') {
1230 *c = '_';
1231 }
1232 }
1233
1234 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1235 sanitized_name);
1236 g_free(sanitized_name);
1237
1238 fd = mkstemp(filename);
1239 if (fd >= 0) {
1240 unlink(filename);
1241 g_free(filename);
1242 break;
1243 }
1244 g_free(filename);
1245 }
1246 if (errno != EEXIST && errno != EINTR) {
1247 error_setg_errno(errp, errno,
1248 "can't open backing store %s for guest RAM",
1249 path);
1250 goto error;
1251 }
1252 /*
1253 * Try again on EINTR and EEXIST. The latter happens when
1254 * something else creates the file between our two open().
1255 */
1256 }
1257
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001258 block->page_size = qemu_fd_getpagesize(fd);
Haozhong Zhang83606682016-10-24 20:49:37 +08001259 block->mr->align = block->page_size;
1260#if defined(__s390x__)
1261 if (kvm_enabled()) {
1262 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1263 }
1264#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03001265
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001266 if (memory < block->page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001267 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001268 "or larger than page size 0x%zx",
1269 memory, block->page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001270 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001271 }
1272
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001273 memory = ROUND_UP(memory, block->page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001274
1275 /*
1276 * ftruncate is not supported by hugetlbfs in older
1277 * hosts, so don't bother bailing out on errors.
1278 * If anything goes wrong with it under other filesystems,
1279 * mmap will fail.
1280 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001281 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001282 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001283 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001284
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001285 area = qemu_ram_mmap(fd, memory, block->mr->align,
1286 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001287 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001288 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001289 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001290 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001291 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001292
1293 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001294 os_mem_prealloc(fd, area, memory, errp);
1295 if (errp && *errp) {
1296 goto error;
1297 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001298 }
1299
Alex Williamson04b16652010-07-02 11:13:17 -06001300 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001301 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001302
1303error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001304 if (area != MAP_FAILED) {
1305 qemu_ram_munmap(area, memory);
1306 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001307 if (unlink_on_error) {
1308 unlink(path);
1309 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001310 if (fd != -1) {
1311 close(fd);
1312 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001313 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001314}
1315#endif
1316
Mike Day0dc3f442013-09-05 14:41:35 -04001317/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001318static ram_addr_t find_ram_offset(ram_addr_t size)
1319{
Alex Williamson04b16652010-07-02 11:13:17 -06001320 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001321 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001322
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001323 assert(size != 0); /* it would hand out same offset multiple times */
1324
Mike Day0dc3f442013-09-05 14:41:35 -04001325 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001326 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001327 }
Alex Williamson04b16652010-07-02 11:13:17 -06001328
Mike Day0dc3f442013-09-05 14:41:35 -04001329 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001330 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001331
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001332 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001333
Mike Day0dc3f442013-09-05 14:41:35 -04001334 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001335 if (next_block->offset >= end) {
1336 next = MIN(next, next_block->offset);
1337 }
1338 }
1339 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001340 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001341 mingap = next - end;
1342 }
1343 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001344
1345 if (offset == RAM_ADDR_MAX) {
1346 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1347 (uint64_t)size);
1348 abort();
1349 }
1350
Alex Williamson04b16652010-07-02 11:13:17 -06001351 return offset;
1352}
1353
Juan Quintela652d7ec2012-07-20 10:37:54 +02001354ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001355{
Alex Williamsond17b5282010-06-25 11:08:38 -06001356 RAMBlock *block;
1357 ram_addr_t last = 0;
1358
Mike Day0dc3f442013-09-05 14:41:35 -04001359 rcu_read_lock();
1360 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001361 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001362 }
Mike Day0dc3f442013-09-05 14:41:35 -04001363 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001364 return last;
1365}
1366
Jason Baronddb97f12012-08-02 15:44:16 -04001367static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1368{
1369 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001370
1371 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001372 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001373 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1374 if (ret) {
1375 perror("qemu_madvise");
1376 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1377 "but dump_guest_core=off specified\n");
1378 }
1379 }
1380}
1381
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001382const char *qemu_ram_get_idstr(RAMBlock *rb)
1383{
1384 return rb->idstr;
1385}
1386
Mike Dayae3a7042013-09-05 14:41:35 -04001387/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001388void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001389{
Gongleifa53a0e2016-05-10 10:04:59 +08001390 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001391
Avi Kivityc5705a72011-12-20 15:59:12 +02001392 assert(new_block);
1393 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001394
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001395 if (dev) {
1396 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001397 if (id) {
1398 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001399 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001400 }
1401 }
1402 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1403
Gongleiab0a9952016-05-10 10:05:00 +08001404 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001405 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001406 if (block != new_block &&
1407 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001408 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1409 new_block->idstr);
1410 abort();
1411 }
1412 }
Mike Day0dc3f442013-09-05 14:41:35 -04001413 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001414}
1415
Mike Dayae3a7042013-09-05 14:41:35 -04001416/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001417void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001418{
Mike Dayae3a7042013-09-05 14:41:35 -04001419 /* FIXME: arch_init.c assumes that this is not called throughout
1420 * migration. Ignore the problem since hot-unplug during migration
1421 * does not work anyway.
1422 */
Hu Tao20cfe882014-04-02 15:13:26 +08001423 if (block) {
1424 memset(block->idstr, 0, sizeof(block->idstr));
1425 }
1426}
1427
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001428size_t qemu_ram_pagesize(RAMBlock *rb)
1429{
1430 return rb->page_size;
1431}
1432
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001433static int memory_try_enable_merging(void *addr, size_t len)
1434{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001435 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001436 /* disabled by the user */
1437 return 0;
1438 }
1439
1440 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1441}
1442
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001443/* Only legal before guest might have detected the memory size: e.g. on
1444 * incoming migration, or right after reset.
1445 *
1446 * As memory core doesn't know how is memory accessed, it is up to
1447 * resize callback to update device state and/or add assertions to detect
1448 * misuse, if necessary.
1449 */
Gongleifa53a0e2016-05-10 10:04:59 +08001450int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001451{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001452 assert(block);
1453
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001454 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001455
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001456 if (block->used_length == newsize) {
1457 return 0;
1458 }
1459
1460 if (!(block->flags & RAM_RESIZEABLE)) {
1461 error_setg_errno(errp, EINVAL,
1462 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1463 " in != 0x" RAM_ADDR_FMT, block->idstr,
1464 newsize, block->used_length);
1465 return -EINVAL;
1466 }
1467
1468 if (block->max_length < newsize) {
1469 error_setg_errno(errp, EINVAL,
1470 "Length too large: %s: 0x" RAM_ADDR_FMT
1471 " > 0x" RAM_ADDR_FMT, block->idstr,
1472 newsize, block->max_length);
1473 return -EINVAL;
1474 }
1475
1476 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1477 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001478 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1479 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001480 memory_region_set_size(block->mr, newsize);
1481 if (block->resized) {
1482 block->resized(block->idstr, newsize, block->host);
1483 }
1484 return 0;
1485}
1486
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001487/* Called with ram_list.mutex held */
1488static void dirty_memory_extend(ram_addr_t old_ram_size,
1489 ram_addr_t new_ram_size)
1490{
1491 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1492 DIRTY_MEMORY_BLOCK_SIZE);
1493 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1494 DIRTY_MEMORY_BLOCK_SIZE);
1495 int i;
1496
1497 /* Only need to extend if block count increased */
1498 if (new_num_blocks <= old_num_blocks) {
1499 return;
1500 }
1501
1502 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1503 DirtyMemoryBlocks *old_blocks;
1504 DirtyMemoryBlocks *new_blocks;
1505 int j;
1506
1507 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1508 new_blocks = g_malloc(sizeof(*new_blocks) +
1509 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1510
1511 if (old_num_blocks) {
1512 memcpy(new_blocks->blocks, old_blocks->blocks,
1513 old_num_blocks * sizeof(old_blocks->blocks[0]));
1514 }
1515
1516 for (j = old_num_blocks; j < new_num_blocks; j++) {
1517 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1518 }
1519
1520 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1521
1522 if (old_blocks) {
1523 g_free_rcu(old_blocks, rcu);
1524 }
1525 }
1526}
1527
Fam Zheng528f46a2016-03-01 14:18:18 +08001528static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001529{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001530 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001531 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001532 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001533 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001534
1535 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001536
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001537 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001538 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001539
1540 if (!new_block->host) {
1541 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001542 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001543 new_block->mr, &err);
1544 if (err) {
1545 error_propagate(errp, err);
1546 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001547 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001548 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001549 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001550 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001551 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001552 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001553 error_setg_errno(errp, errno,
1554 "cannot set up guest memory '%s'",
1555 memory_region_name(new_block->mr));
1556 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001557 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001558 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001559 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001560 }
1561 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001562
Li Zhijiandd631692015-07-02 20:18:06 +08001563 new_ram_size = MAX(old_ram_size,
1564 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1565 if (new_ram_size > old_ram_size) {
1566 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001567 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001568 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001569 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1570 * QLIST (which has an RCU-friendly variant) does not have insertion at
1571 * tail, so save the last element in last_block.
1572 */
Mike Day0dc3f442013-09-05 14:41:35 -04001573 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001574 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001575 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001576 break;
1577 }
1578 }
1579 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001580 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001581 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001582 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001583 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001584 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001585 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001586 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001587
Mike Day0dc3f442013-09-05 14:41:35 -04001588 /* Write list before version */
1589 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001590 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001591 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001592
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001593 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001594 new_block->used_length,
1595 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001596
Paolo Bonzinia904c912015-01-21 16:18:35 +01001597 if (new_block->host) {
1598 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1599 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
Cao jinc2cd6272016-09-12 14:34:56 +08001600 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
Paolo Bonzinia904c912015-01-21 16:18:35 +01001601 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001602 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001603}
1604
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001605#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001606RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1607 bool share, const char *mem_path,
1608 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001609{
1610 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001611 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001612
1613 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001614 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001615 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001616 }
1617
1618 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1619 /*
1620 * file_ram_alloc() needs to allocate just like
1621 * phys_mem_alloc, but we haven't bothered to provide
1622 * a hook there.
1623 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001624 error_setg(errp,
1625 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001626 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001627 }
1628
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001629 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001630 new_block = g_malloc0(sizeof(*new_block));
1631 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001632 new_block->used_length = size;
1633 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001634 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001635 new_block->host = file_ram_alloc(new_block, size,
1636 mem_path, errp);
1637 if (!new_block->host) {
1638 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001639 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001640 }
1641
Fam Zheng528f46a2016-03-01 14:18:18 +08001642 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001643 if (local_err) {
1644 g_free(new_block);
1645 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001646 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001647 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001648 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001649}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001650#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001651
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001652static
Fam Zheng528f46a2016-03-01 14:18:18 +08001653RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1654 void (*resized)(const char*,
1655 uint64_t length,
1656 void *host),
1657 void *host, bool resizeable,
1658 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001659{
1660 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001661 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001662
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001663 size = HOST_PAGE_ALIGN(size);
1664 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001665 new_block = g_malloc0(sizeof(*new_block));
1666 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001667 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001668 new_block->used_length = size;
1669 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001670 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001671 new_block->fd = -1;
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001672 new_block->page_size = getpagesize();
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001673 new_block->host = host;
1674 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001675 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001676 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001677 if (resizeable) {
1678 new_block->flags |= RAM_RESIZEABLE;
1679 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001680 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001681 if (local_err) {
1682 g_free(new_block);
1683 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001684 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001685 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001686 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001687}
1688
Fam Zheng528f46a2016-03-01 14:18:18 +08001689RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001690 MemoryRegion *mr, Error **errp)
1691{
1692 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1693}
1694
Fam Zheng528f46a2016-03-01 14:18:18 +08001695RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001696{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001697 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1698}
1699
Fam Zheng528f46a2016-03-01 14:18:18 +08001700RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001701 void (*resized)(const char*,
1702 uint64_t length,
1703 void *host),
1704 MemoryRegion *mr, Error **errp)
1705{
1706 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001707}
bellarde9a1ab12007-02-08 23:08:38 +00001708
Paolo Bonzini43771532013-09-09 17:58:40 +02001709static void reclaim_ramblock(RAMBlock *block)
1710{
1711 if (block->flags & RAM_PREALLOC) {
1712 ;
1713 } else if (xen_enabled()) {
1714 xen_invalidate_map_cache_entry(block->host);
1715#ifndef _WIN32
1716 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001717 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001718 close(block->fd);
1719#endif
1720 } else {
1721 qemu_anon_ram_free(block->host, block->max_length);
1722 }
1723 g_free(block);
1724}
1725
Fam Zhengf1060c52016-03-01 14:18:22 +08001726void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001727{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001728 if (!block) {
1729 return;
1730 }
1731
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001732 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001733 QLIST_REMOVE_RCU(block, next);
1734 ram_list.mru_block = NULL;
1735 /* Write list before version */
1736 smp_wmb();
1737 ram_list.version++;
1738 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001739 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001740}
1741
Huang Yingcd19cfa2011-03-02 08:56:19 +01001742#ifndef _WIN32
1743void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1744{
1745 RAMBlock *block;
1746 ram_addr_t offset;
1747 int flags;
1748 void *area, *vaddr;
1749
Mike Day0dc3f442013-09-05 14:41:35 -04001750 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001751 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001752 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001753 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001754 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001755 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001756 } else if (xen_enabled()) {
1757 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001758 } else {
1759 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001760 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001761 flags |= (block->flags & RAM_SHARED ?
1762 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001763 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1764 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001765 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001766 /*
1767 * Remap needs to match alloc. Accelerators that
1768 * set phys_mem_alloc never remap. If they did,
1769 * we'd need a remap hook here.
1770 */
1771 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1772
Huang Yingcd19cfa2011-03-02 08:56:19 +01001773 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1774 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1775 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001776 }
1777 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001778 fprintf(stderr, "Could not remap addr: "
1779 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001780 length, addr);
1781 exit(1);
1782 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001783 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001784 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001785 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001786 }
1787 }
1788}
1789#endif /* !_WIN32 */
1790
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001791/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001792 * This should not be used for general purpose DMA. Use address_space_map
1793 * or address_space_rw instead. For local memory (e.g. video ram) that the
1794 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001795 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001796 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001797 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001798void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001799{
Gonglei3655cb92016-02-20 10:35:20 +08001800 RAMBlock *block = ram_block;
1801
1802 if (block == NULL) {
1803 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001804 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001805 }
Mike Dayae3a7042013-09-05 14:41:35 -04001806
1807 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001808 /* We need to check if the requested address is in the RAM
1809 * because we don't want to map the entire memory in QEMU.
1810 * In that case just map until the end of the page.
1811 */
1812 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001813 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001814 }
Mike Dayae3a7042013-09-05 14:41:35 -04001815
1816 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001817 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001818 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001819}
1820
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001821/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001822 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001823 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001824 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001825 */
Gonglei3655cb92016-02-20 10:35:20 +08001826static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1827 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001828{
Gonglei3655cb92016-02-20 10:35:20 +08001829 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001830 if (*size == 0) {
1831 return NULL;
1832 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001833
Gonglei3655cb92016-02-20 10:35:20 +08001834 if (block == NULL) {
1835 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001836 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001837 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001838 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001839
1840 if (xen_enabled() && block->host == NULL) {
1841 /* We need to check if the requested address is in the RAM
1842 * because we don't want to map the entire memory in QEMU.
1843 * In that case just map the requested area.
1844 */
1845 if (block->offset == 0) {
1846 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001847 }
1848
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001849 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001850 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001851
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001852 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001853}
1854
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001855/*
1856 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1857 * in that RAMBlock.
1858 *
1859 * ptr: Host pointer to look up
1860 * round_offset: If true round the result offset down to a page boundary
1861 * *ram_addr: set to result ram_addr
1862 * *offset: set to result offset within the RAMBlock
1863 *
1864 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001865 *
1866 * By the time this function returns, the returned pointer is not protected
1867 * by RCU anymore. If the caller is not within an RCU critical section and
1868 * does not hold the iothread lock, it must have other means of protecting the
1869 * pointer, such as a reference to the region that includes the incoming
1870 * ram_addr_t.
1871 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001872RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001873 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001874{
pbrook94a6b542009-04-11 17:15:54 +00001875 RAMBlock *block;
1876 uint8_t *host = ptr;
1877
Jan Kiszka868bb332011-06-21 22:59:09 +02001878 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001879 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001880 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001881 ram_addr = xen_ram_addr_from_mapcache(ptr);
1882 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001883 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001884 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001885 }
Mike Day0dc3f442013-09-05 14:41:35 -04001886 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001887 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001888 }
1889
Mike Day0dc3f442013-09-05 14:41:35 -04001890 rcu_read_lock();
1891 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001892 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001893 goto found;
1894 }
1895
Mike Day0dc3f442013-09-05 14:41:35 -04001896 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001897 /* This case append when the block is not mapped. */
1898 if (block->host == NULL) {
1899 continue;
1900 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001901 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001902 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001903 }
pbrook94a6b542009-04-11 17:15:54 +00001904 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001905
Mike Day0dc3f442013-09-05 14:41:35 -04001906 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001907 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001908
1909found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001910 *offset = (host - block->host);
1911 if (round_offset) {
1912 *offset &= TARGET_PAGE_MASK;
1913 }
Mike Day0dc3f442013-09-05 14:41:35 -04001914 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001915 return block;
1916}
1917
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001918/*
1919 * Finds the named RAMBlock
1920 *
1921 * name: The name of RAMBlock to find
1922 *
1923 * Returns: RAMBlock (or NULL if not found)
1924 */
1925RAMBlock *qemu_ram_block_by_name(const char *name)
1926{
1927 RAMBlock *block;
1928
1929 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1930 if (!strcmp(name, block->idstr)) {
1931 return block;
1932 }
1933 }
1934
1935 return NULL;
1936}
1937
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001938/* Some of the softmmu routines need to translate from a host pointer
1939 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001940ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001941{
1942 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001943 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001944
Paolo Bonzinif615f392016-05-26 10:07:50 +02001945 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001946 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001947 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001948 }
1949
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001950 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001951}
Alex Williamsonf471a172010-06-11 11:11:42 -06001952
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001953/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001954static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001955 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001956{
Juan Quintela52159192013-10-08 12:44:04 +02001957 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001958 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001959 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001960 switch (size) {
1961 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001962 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001963 break;
1964 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001965 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001966 break;
1967 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001968 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001969 break;
1970 default:
1971 abort();
1972 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001973 /* Set both VGA and migration bits for simplicity and to remove
1974 * the notdirty callback faster.
1975 */
1976 cpu_physical_memory_set_dirty_range(ram_addr, size,
1977 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00001978 /* we remove the notdirty callback only if the code has been
1979 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001980 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001981 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001982 }
bellard1ccde1c2004-02-06 19:46:14 +00001983}
1984
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001985static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1986 unsigned size, bool is_write)
1987{
1988 return is_write;
1989}
1990
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001991static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001992 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001993 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001994 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001995};
1996
pbrook0f459d12008-06-09 00:20:13 +00001997/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001998static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001999{
Andreas Färber93afead2013-08-26 03:41:01 +02002000 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002001 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002002 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002003 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002004 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002005 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002006 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002007
Andreas Färberff4700b2013-08-26 18:23:18 +02002008 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002009 /* We re-entered the check after replacing the TB. Now raise
2010 * the debug interrupt so that is will trigger after the
2011 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002012 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002013 return;
2014 }
Andreas Färber93afead2013-08-26 03:41:01 +02002015 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002016 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002017 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2018 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002019 if (flags == BP_MEM_READ) {
2020 wp->flags |= BP_WATCHPOINT_HIT_READ;
2021 } else {
2022 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2023 }
2024 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002025 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002026 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002027 if (wp->flags & BP_CPU &&
2028 !cc->debug_check_watchpoint(cpu, wp)) {
2029 wp->flags &= ~BP_WATCHPOINT_HIT;
2030 continue;
2031 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002032 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002033 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002034 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002035 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002036 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002037 } else {
2038 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002039 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002040 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002041 }
aliguori06d55cc2008-11-18 20:24:06 +00002042 }
aliguori6e140f22008-11-18 20:37:55 +00002043 } else {
2044 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002045 }
2046 }
2047}
2048
pbrook6658ffb2007-03-16 23:58:11 +00002049/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2050 so these check for a hit then pass through to the normal out-of-line
2051 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002052static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2053 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002054{
Peter Maydell66b9b432015-04-26 16:49:24 +01002055 MemTxResult res;
2056 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002057 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2058 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002059
Peter Maydell66b9b432015-04-26 16:49:24 +01002060 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002061 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002062 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002063 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002064 break;
2065 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002066 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002067 break;
2068 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002069 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002070 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002071 default: abort();
2072 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002073 *pdata = data;
2074 return res;
2075}
2076
2077static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2078 uint64_t val, unsigned size,
2079 MemTxAttrs attrs)
2080{
2081 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002082 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2083 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002084
2085 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2086 switch (size) {
2087 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002088 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002089 break;
2090 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002091 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002092 break;
2093 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002094 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002095 break;
2096 default: abort();
2097 }
2098 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002099}
2100
Avi Kivity1ec9b902012-01-02 12:47:48 +02002101static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002102 .read_with_attrs = watch_mem_read,
2103 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002104 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002105};
pbrook6658ffb2007-03-16 23:58:11 +00002106
Peter Maydellf25a49e2015-04-26 16:49:24 +01002107static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2108 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002109{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002110 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002111 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002112 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002113
blueswir1db7b5422007-05-26 17:36:03 +00002114#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002115 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002116 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002117#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002118 res = address_space_read(subpage->as, addr + subpage->base,
2119 attrs, buf, len);
2120 if (res) {
2121 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002122 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002123 switch (len) {
2124 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002125 *data = ldub_p(buf);
2126 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002127 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002128 *data = lduw_p(buf);
2129 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002130 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002131 *data = ldl_p(buf);
2132 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002133 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002134 *data = ldq_p(buf);
2135 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002136 default:
2137 abort();
2138 }
blueswir1db7b5422007-05-26 17:36:03 +00002139}
2140
Peter Maydellf25a49e2015-04-26 16:49:24 +01002141static MemTxResult subpage_write(void *opaque, hwaddr addr,
2142 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002143{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002144 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002145 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002146
blueswir1db7b5422007-05-26 17:36:03 +00002147#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002148 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002149 " value %"PRIx64"\n",
2150 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002151#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002152 switch (len) {
2153 case 1:
2154 stb_p(buf, value);
2155 break;
2156 case 2:
2157 stw_p(buf, value);
2158 break;
2159 case 4:
2160 stl_p(buf, value);
2161 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002162 case 8:
2163 stq_p(buf, value);
2164 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002165 default:
2166 abort();
2167 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002168 return address_space_write(subpage->as, addr + subpage->base,
2169 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002170}
2171
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002172static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002173 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002174{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002175 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002176#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002177 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002178 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002179#endif
2180
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002181 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002182 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002183}
2184
Avi Kivity70c68e42012-01-02 12:32:48 +02002185static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002186 .read_with_attrs = subpage_read,
2187 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002188 .impl.min_access_size = 1,
2189 .impl.max_access_size = 8,
2190 .valid.min_access_size = 1,
2191 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002192 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002193 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002194};
2195
Anthony Liguoric227f092009-10-01 16:12:16 -05002196static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002197 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002198{
2199 int idx, eidx;
2200
2201 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2202 return -1;
2203 idx = SUBPAGE_IDX(start);
2204 eidx = SUBPAGE_IDX(end);
2205#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002206 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2207 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002208#endif
blueswir1db7b5422007-05-26 17:36:03 +00002209 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002210 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002211 }
2212
2213 return 0;
2214}
2215
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002216static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002217{
Anthony Liguoric227f092009-10-01 16:12:16 -05002218 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002219
Anthony Liguori7267c092011-08-20 22:09:37 -05002220 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002221
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002222 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002223 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002224 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002225 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002226 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002227#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002228 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2229 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002230#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002231 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002232
2233 return mmio;
2234}
2235
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002236static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2237 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002238{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002239 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002240 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002241 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002242 .mr = mr,
2243 .offset_within_address_space = 0,
2244 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002245 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002246 };
2247
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002248 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002249}
2250
Peter Maydella54c87b2016-01-21 14:15:05 +00002251MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002252{
Peter Maydella54c87b2016-01-21 14:15:05 +00002253 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2254 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002255 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002256 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002257
2258 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002259}
2260
Avi Kivitye9179ce2009-06-14 11:38:52 +03002261static void io_mem_init(void)
2262{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002263 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002264 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002265 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002266 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002267 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002268 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002269 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002270}
2271
Avi Kivityac1970f2012-10-03 16:22:53 +02002272static void mem_begin(MemoryListener *listener)
2273{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002274 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002275 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2276 uint16_t n;
2277
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002278 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002279 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002280 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002281 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002282 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002283 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002284 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002285 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002286
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002287 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002288 d->as = as;
2289 as->next_dispatch = d;
2290}
2291
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002292static void address_space_dispatch_free(AddressSpaceDispatch *d)
2293{
2294 phys_sections_free(&d->map);
2295 g_free(d);
2296}
2297
Paolo Bonzini00752702013-05-29 12:13:54 +02002298static void mem_commit(MemoryListener *listener)
2299{
2300 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002301 AddressSpaceDispatch *cur = as->dispatch;
2302 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002303
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002304 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002305
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002306 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002307 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002308 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002309 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002310}
2311
Avi Kivity1d711482012-10-02 18:54:45 +02002312static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002313{
Peter Maydell32857f42015-10-01 15:29:50 +01002314 CPUAddressSpace *cpuas;
2315 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002316
2317 /* since each CPU stores ram addresses in its TLB cache, we must
2318 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002319 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2320 cpu_reloading_memory_map();
2321 /* The CPU and TLB are protected by the iothread lock.
2322 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2323 * may have split the RCU critical section.
2324 */
2325 d = atomic_rcu_read(&cpuas->as->dispatch);
2326 cpuas->memory_dispatch = d;
2327 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002328}
2329
Avi Kivityac1970f2012-10-03 16:22:53 +02002330void address_space_init_dispatch(AddressSpace *as)
2331{
Paolo Bonzini00752702013-05-29 12:13:54 +02002332 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002333 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002334 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002335 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002336 .region_add = mem_add,
2337 .region_nop = mem_add,
2338 .priority = 0,
2339 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002340 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002341}
2342
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002343void address_space_unregister(AddressSpace *as)
2344{
2345 memory_listener_unregister(&as->dispatch_listener);
2346}
2347
Avi Kivity83f3c252012-10-07 12:59:55 +02002348void address_space_destroy_dispatch(AddressSpace *as)
2349{
2350 AddressSpaceDispatch *d = as->dispatch;
2351
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002352 atomic_rcu_set(&as->dispatch, NULL);
2353 if (d) {
2354 call_rcu(d, address_space_dispatch_free, rcu);
2355 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002356}
2357
Avi Kivity62152b82011-07-26 14:26:14 +03002358static void memory_map_init(void)
2359{
Anthony Liguori7267c092011-08-20 22:09:37 -05002360 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002361
Paolo Bonzini57271d62013-11-07 17:14:37 +01002362 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002363 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002364
Anthony Liguori7267c092011-08-20 22:09:37 -05002365 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002366 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2367 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002368 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002369}
2370
2371MemoryRegion *get_system_memory(void)
2372{
2373 return system_memory;
2374}
2375
Avi Kivity309cb472011-08-08 16:09:03 +03002376MemoryRegion *get_system_io(void)
2377{
2378 return system_io;
2379}
2380
pbrooke2eef172008-06-08 01:09:01 +00002381#endif /* !defined(CONFIG_USER_ONLY) */
2382
bellard13eb76e2004-01-24 15:23:36 +00002383/* physical memory access (slow version, mainly for debug) */
2384#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002385int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002386 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002387{
2388 int l, flags;
2389 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002390 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002391
2392 while (len > 0) {
2393 page = addr & TARGET_PAGE_MASK;
2394 l = (page + TARGET_PAGE_SIZE) - addr;
2395 if (l > len)
2396 l = len;
2397 flags = page_get_flags(page);
2398 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002399 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002400 if (is_write) {
2401 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002402 return -1;
bellard579a97f2007-11-11 14:26:47 +00002403 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002404 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002405 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002406 memcpy(p, buf, l);
2407 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002408 } else {
2409 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002410 return -1;
bellard579a97f2007-11-11 14:26:47 +00002411 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002412 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002413 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002414 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002415 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002416 }
2417 len -= l;
2418 buf += l;
2419 addr += l;
2420 }
Paul Brooka68fe892010-03-01 00:08:59 +00002421 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002422}
bellard8df1cd02005-01-28 22:37:22 +00002423
bellard13eb76e2004-01-24 15:23:36 +00002424#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002425
Paolo Bonzini845b6212015-03-23 11:45:53 +01002426static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002427 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002428{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002429 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002430 addr += memory_region_get_ram_addr(mr);
2431
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002432 /* No early return if dirty_log_mask is or becomes 0, because
2433 * cpu_physical_memory_set_dirty_range will still call
2434 * xen_modified_memory.
2435 */
2436 if (dirty_log_mask) {
2437 dirty_log_mask =
2438 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002439 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002440 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2441 tb_invalidate_phys_range(addr, addr + length);
2442 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2443 }
2444 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002445}
2446
Richard Henderson23326162013-07-08 14:55:59 -07002447static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002448{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002449 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002450
2451 /* Regions are assumed to support 1-4 byte accesses unless
2452 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002453 if (access_size_max == 0) {
2454 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002455 }
Richard Henderson23326162013-07-08 14:55:59 -07002456
2457 /* Bound the maximum access by the alignment of the address. */
2458 if (!mr->ops->impl.unaligned) {
2459 unsigned align_size_max = addr & -addr;
2460 if (align_size_max != 0 && align_size_max < access_size_max) {
2461 access_size_max = align_size_max;
2462 }
2463 }
2464
2465 /* Don't attempt accesses larger than the maximum. */
2466 if (l > access_size_max) {
2467 l = access_size_max;
2468 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002469 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002470
2471 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002472}
2473
Jan Kiszka4840f102015-06-18 18:47:22 +02002474static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002475{
Jan Kiszka4840f102015-06-18 18:47:22 +02002476 bool unlocked = !qemu_mutex_iothread_locked();
2477 bool release_lock = false;
2478
2479 if (unlocked && mr->global_locking) {
2480 qemu_mutex_lock_iothread();
2481 unlocked = false;
2482 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002483 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002484 if (mr->flush_coalesced_mmio) {
2485 if (unlocked) {
2486 qemu_mutex_lock_iothread();
2487 }
2488 qemu_flush_coalesced_mmio_buffer();
2489 if (unlocked) {
2490 qemu_mutex_unlock_iothread();
2491 }
2492 }
2493
2494 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002495}
2496
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002497/* Called within RCU critical section. */
2498static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2499 MemTxAttrs attrs,
2500 const uint8_t *buf,
2501 int len, hwaddr addr1,
2502 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002503{
bellard13eb76e2004-01-24 15:23:36 +00002504 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002505 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002506 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002507 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002508
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002509 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002510 if (!memory_access_is_direct(mr, true)) {
2511 release_lock |= prepare_mmio_access(mr);
2512 l = memory_access_size(mr, l, addr1);
2513 /* XXX: could force current_cpu to NULL to avoid
2514 potential bugs */
2515 switch (l) {
2516 case 8:
2517 /* 64 bit write access */
2518 val = ldq_p(buf);
2519 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2520 attrs);
2521 break;
2522 case 4:
2523 /* 32 bit write access */
2524 val = ldl_p(buf);
2525 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2526 attrs);
2527 break;
2528 case 2:
2529 /* 16 bit write access */
2530 val = lduw_p(buf);
2531 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2532 attrs);
2533 break;
2534 case 1:
2535 /* 8 bit write access */
2536 val = ldub_p(buf);
2537 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2538 attrs);
2539 break;
2540 default:
2541 abort();
bellard13eb76e2004-01-24 15:23:36 +00002542 }
2543 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002544 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002545 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002546 memcpy(ptr, buf, l);
2547 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002548 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002549
2550 if (release_lock) {
2551 qemu_mutex_unlock_iothread();
2552 release_lock = false;
2553 }
2554
bellard13eb76e2004-01-24 15:23:36 +00002555 len -= l;
2556 buf += l;
2557 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002558
2559 if (!len) {
2560 break;
2561 }
2562
2563 l = len;
2564 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002565 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002566
Peter Maydell3b643492015-04-26 16:49:23 +01002567 return result;
bellard13eb76e2004-01-24 15:23:36 +00002568}
bellard8df1cd02005-01-28 22:37:22 +00002569
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002570MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2571 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002572{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002573 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002574 hwaddr addr1;
2575 MemoryRegion *mr;
2576 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002577
2578 if (len > 0) {
2579 rcu_read_lock();
2580 l = len;
2581 mr = address_space_translate(as, addr, &addr1, &l, true);
2582 result = address_space_write_continue(as, addr, attrs, buf, len,
2583 addr1, l, mr);
2584 rcu_read_unlock();
2585 }
2586
2587 return result;
2588}
2589
2590/* Called within RCU critical section. */
2591MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2592 MemTxAttrs attrs, uint8_t *buf,
2593 int len, hwaddr addr1, hwaddr l,
2594 MemoryRegion *mr)
2595{
2596 uint8_t *ptr;
2597 uint64_t val;
2598 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002599 bool release_lock = false;
2600
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002601 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002602 if (!memory_access_is_direct(mr, false)) {
2603 /* I/O case */
2604 release_lock |= prepare_mmio_access(mr);
2605 l = memory_access_size(mr, l, addr1);
2606 switch (l) {
2607 case 8:
2608 /* 64 bit read access */
2609 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2610 attrs);
2611 stq_p(buf, val);
2612 break;
2613 case 4:
2614 /* 32 bit read access */
2615 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2616 attrs);
2617 stl_p(buf, val);
2618 break;
2619 case 2:
2620 /* 16 bit read access */
2621 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2622 attrs);
2623 stw_p(buf, val);
2624 break;
2625 case 1:
2626 /* 8 bit read access */
2627 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2628 attrs);
2629 stb_p(buf, val);
2630 break;
2631 default:
2632 abort();
2633 }
2634 } else {
2635 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002636 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002637 memcpy(buf, ptr, l);
2638 }
2639
2640 if (release_lock) {
2641 qemu_mutex_unlock_iothread();
2642 release_lock = false;
2643 }
2644
2645 len -= l;
2646 buf += l;
2647 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002648
2649 if (!len) {
2650 break;
2651 }
2652
2653 l = len;
2654 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002655 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002656
2657 return result;
2658}
2659
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002660MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2661 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002662{
2663 hwaddr l;
2664 hwaddr addr1;
2665 MemoryRegion *mr;
2666 MemTxResult result = MEMTX_OK;
2667
2668 if (len > 0) {
2669 rcu_read_lock();
2670 l = len;
2671 mr = address_space_translate(as, addr, &addr1, &l, false);
2672 result = address_space_read_continue(as, addr, attrs, buf, len,
2673 addr1, l, mr);
2674 rcu_read_unlock();
2675 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002676
2677 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002678}
2679
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002680MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2681 uint8_t *buf, int len, bool is_write)
2682{
2683 if (is_write) {
2684 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2685 } else {
2686 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2687 }
2688}
Avi Kivityac1970f2012-10-03 16:22:53 +02002689
Avi Kivitya8170e52012-10-23 12:30:10 +02002690void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002691 int len, int is_write)
2692{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002693 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2694 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002695}
2696
Alexander Graf582b55a2013-12-11 14:17:44 +01002697enum write_rom_type {
2698 WRITE_DATA,
2699 FLUSH_CACHE,
2700};
2701
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002702static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002703 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002704{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002705 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002706 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002707 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002708 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002709
Paolo Bonzini41063e12015-03-18 14:21:43 +01002710 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002711 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002712 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002713 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002714
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002715 if (!(memory_region_is_ram(mr) ||
2716 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002717 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002718 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002719 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002720 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002721 switch (type) {
2722 case WRITE_DATA:
2723 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002724 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002725 break;
2726 case FLUSH_CACHE:
2727 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2728 break;
2729 }
bellardd0ecd2a2006-04-23 17:14:48 +00002730 }
2731 len -= l;
2732 buf += l;
2733 addr += l;
2734 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002735 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002736}
2737
Alexander Graf582b55a2013-12-11 14:17:44 +01002738/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002739void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002740 const uint8_t *buf, int len)
2741{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002742 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002743}
2744
2745void cpu_flush_icache_range(hwaddr start, int len)
2746{
2747 /*
2748 * This function should do the same thing as an icache flush that was
2749 * triggered from within the guest. For TCG we are always cache coherent,
2750 * so there is no need to flush anything. For KVM / Xen we need to flush
2751 * the host's instruction cache at least.
2752 */
2753 if (tcg_enabled()) {
2754 return;
2755 }
2756
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002757 cpu_physical_memory_write_rom_internal(&address_space_memory,
2758 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002759}
2760
aliguori6d16c2f2009-01-22 16:59:11 +00002761typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002762 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002763 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002764 hwaddr addr;
2765 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002766 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002767} BounceBuffer;
2768
2769static BounceBuffer bounce;
2770
aliguoriba223c22009-01-22 16:59:16 +00002771typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002772 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002773 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002774} MapClient;
2775
Fam Zheng38e047b2015-03-16 17:03:35 +08002776QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002777static QLIST_HEAD(map_client_list, MapClient) map_client_list
2778 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002779
Fam Zhenge95205e2015-03-16 17:03:37 +08002780static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002781{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002782 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002783 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002784}
2785
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002786static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002787{
2788 MapClient *client;
2789
Blue Swirl72cf2d42009-09-12 07:36:22 +00002790 while (!QLIST_EMPTY(&map_client_list)) {
2791 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002792 qemu_bh_schedule(client->bh);
2793 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002794 }
2795}
2796
Fam Zhenge95205e2015-03-16 17:03:37 +08002797void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002798{
2799 MapClient *client = g_malloc(sizeof(*client));
2800
Fam Zheng38e047b2015-03-16 17:03:35 +08002801 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002802 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002803 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002804 if (!atomic_read(&bounce.in_use)) {
2805 cpu_notify_map_clients_locked();
2806 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002807 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002808}
2809
Fam Zheng38e047b2015-03-16 17:03:35 +08002810void cpu_exec_init_all(void)
2811{
2812 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002813 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002814 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002815 qemu_mutex_init(&map_client_list_lock);
2816}
2817
Fam Zhenge95205e2015-03-16 17:03:37 +08002818void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002819{
Fam Zhenge95205e2015-03-16 17:03:37 +08002820 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002821
Fam Zhenge95205e2015-03-16 17:03:37 +08002822 qemu_mutex_lock(&map_client_list_lock);
2823 QLIST_FOREACH(client, &map_client_list, link) {
2824 if (client->bh == bh) {
2825 cpu_unregister_map_client_do(client);
2826 break;
2827 }
2828 }
2829 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002830}
2831
2832static void cpu_notify_map_clients(void)
2833{
Fam Zheng38e047b2015-03-16 17:03:35 +08002834 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002835 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002836 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002837}
2838
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002839bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2840{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002841 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002842 hwaddr l, xlat;
2843
Paolo Bonzini41063e12015-03-18 14:21:43 +01002844 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002845 while (len > 0) {
2846 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002847 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2848 if (!memory_access_is_direct(mr, is_write)) {
2849 l = memory_access_size(mr, l, addr);
2850 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002851 return false;
2852 }
2853 }
2854
2855 len -= l;
2856 addr += l;
2857 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002858 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002859 return true;
2860}
2861
aliguori6d16c2f2009-01-22 16:59:11 +00002862/* Map a physical memory region into a host virtual address.
2863 * May map a subset of the requested range, given by and returned in *plen.
2864 * May return NULL if resources needed to perform the mapping are exhausted.
2865 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002866 * Use cpu_register_map_client() to know when retrying the map operation is
2867 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002868 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002869void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002870 hwaddr addr,
2871 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002872 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002873{
Avi Kivitya8170e52012-10-23 12:30:10 +02002874 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002875 hwaddr done = 0;
2876 hwaddr l, xlat, base;
2877 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002878 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002879
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002880 if (len == 0) {
2881 return NULL;
2882 }
aliguori6d16c2f2009-01-22 16:59:11 +00002883
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002884 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002885 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002886 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002887
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002888 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002889 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002890 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002891 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002892 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002893 /* Avoid unbounded allocations */
2894 l = MIN(l, TARGET_PAGE_SIZE);
2895 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002896 bounce.addr = addr;
2897 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002898
2899 memory_region_ref(mr);
2900 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002901 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002902 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2903 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002904 }
aliguori6d16c2f2009-01-22 16:59:11 +00002905
Paolo Bonzini41063e12015-03-18 14:21:43 +01002906 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002907 *plen = l;
2908 return bounce.buffer;
2909 }
2910
2911 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002912
2913 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002914 len -= l;
2915 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002916 done += l;
2917 if (len == 0) {
2918 break;
2919 }
2920
2921 l = len;
2922 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2923 if (this_mr != mr || xlat != base + done) {
2924 break;
2925 }
aliguori6d16c2f2009-01-22 16:59:11 +00002926 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002927
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002928 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002929 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002930 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002931 rcu_read_unlock();
2932
2933 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002934}
2935
Avi Kivityac1970f2012-10-03 16:22:53 +02002936/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002937 * Will also mark the memory as dirty if is_write == 1. access_len gives
2938 * the amount of memory that was actually read or written by the caller.
2939 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002940void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2941 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002942{
2943 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002944 MemoryRegion *mr;
2945 ram_addr_t addr1;
2946
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002947 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002948 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002949 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002950 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002951 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002952 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002953 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002954 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002955 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002956 return;
2957 }
2958 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002959 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2960 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002961 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002962 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002963 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002964 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002965 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002966 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002967}
bellardd0ecd2a2006-04-23 17:14:48 +00002968
Avi Kivitya8170e52012-10-23 12:30:10 +02002969void *cpu_physical_memory_map(hwaddr addr,
2970 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002971 int is_write)
2972{
2973 return address_space_map(&address_space_memory, addr, plen, is_write);
2974}
2975
Avi Kivitya8170e52012-10-23 12:30:10 +02002976void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2977 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002978{
2979 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2980}
2981
bellard8df1cd02005-01-28 22:37:22 +00002982/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002983static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2984 MemTxAttrs attrs,
2985 MemTxResult *result,
2986 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002987{
bellard8df1cd02005-01-28 22:37:22 +00002988 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002989 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002990 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002991 hwaddr l = 4;
2992 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002993 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02002994 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00002995
Paolo Bonzini41063e12015-03-18 14:21:43 +01002996 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002997 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002998 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02002999 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003000
bellard8df1cd02005-01-28 22:37:22 +00003001 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003002 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003003#if defined(TARGET_WORDS_BIGENDIAN)
3004 if (endian == DEVICE_LITTLE_ENDIAN) {
3005 val = bswap32(val);
3006 }
3007#else
3008 if (endian == DEVICE_BIG_ENDIAN) {
3009 val = bswap32(val);
3010 }
3011#endif
bellard8df1cd02005-01-28 22:37:22 +00003012 } else {
3013 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003014 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003015 switch (endian) {
3016 case DEVICE_LITTLE_ENDIAN:
3017 val = ldl_le_p(ptr);
3018 break;
3019 case DEVICE_BIG_ENDIAN:
3020 val = ldl_be_p(ptr);
3021 break;
3022 default:
3023 val = ldl_p(ptr);
3024 break;
3025 }
Peter Maydell50013112015-04-26 16:49:24 +01003026 r = MEMTX_OK;
3027 }
3028 if (result) {
3029 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003030 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003031 if (release_lock) {
3032 qemu_mutex_unlock_iothread();
3033 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003034 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003035 return val;
3036}
3037
Peter Maydell50013112015-04-26 16:49:24 +01003038uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3039 MemTxAttrs attrs, MemTxResult *result)
3040{
3041 return address_space_ldl_internal(as, addr, attrs, result,
3042 DEVICE_NATIVE_ENDIAN);
3043}
3044
3045uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3046 MemTxAttrs attrs, MemTxResult *result)
3047{
3048 return address_space_ldl_internal(as, addr, attrs, result,
3049 DEVICE_LITTLE_ENDIAN);
3050}
3051
3052uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3053 MemTxAttrs attrs, MemTxResult *result)
3054{
3055 return address_space_ldl_internal(as, addr, attrs, result,
3056 DEVICE_BIG_ENDIAN);
3057}
3058
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003059uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003060{
Peter Maydell50013112015-04-26 16:49:24 +01003061 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003062}
3063
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003064uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003065{
Peter Maydell50013112015-04-26 16:49:24 +01003066 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003067}
3068
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003069uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003070{
Peter Maydell50013112015-04-26 16:49:24 +01003071 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003072}
3073
bellard84b7b8e2005-11-28 21:19:04 +00003074/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003075static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3076 MemTxAttrs attrs,
3077 MemTxResult *result,
3078 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003079{
bellard84b7b8e2005-11-28 21:19:04 +00003080 uint8_t *ptr;
3081 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003082 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003083 hwaddr l = 8;
3084 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003085 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003086 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003087
Paolo Bonzini41063e12015-03-18 14:21:43 +01003088 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003089 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003090 false);
3091 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003092 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003093
bellard84b7b8e2005-11-28 21:19:04 +00003094 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003095 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003096#if defined(TARGET_WORDS_BIGENDIAN)
3097 if (endian == DEVICE_LITTLE_ENDIAN) {
3098 val = bswap64(val);
3099 }
3100#else
3101 if (endian == DEVICE_BIG_ENDIAN) {
3102 val = bswap64(val);
3103 }
3104#endif
bellard84b7b8e2005-11-28 21:19:04 +00003105 } else {
3106 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003107 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003108 switch (endian) {
3109 case DEVICE_LITTLE_ENDIAN:
3110 val = ldq_le_p(ptr);
3111 break;
3112 case DEVICE_BIG_ENDIAN:
3113 val = ldq_be_p(ptr);
3114 break;
3115 default:
3116 val = ldq_p(ptr);
3117 break;
3118 }
Peter Maydell50013112015-04-26 16:49:24 +01003119 r = MEMTX_OK;
3120 }
3121 if (result) {
3122 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003123 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003124 if (release_lock) {
3125 qemu_mutex_unlock_iothread();
3126 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003127 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003128 return val;
3129}
3130
Peter Maydell50013112015-04-26 16:49:24 +01003131uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3132 MemTxAttrs attrs, MemTxResult *result)
3133{
3134 return address_space_ldq_internal(as, addr, attrs, result,
3135 DEVICE_NATIVE_ENDIAN);
3136}
3137
3138uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3139 MemTxAttrs attrs, MemTxResult *result)
3140{
3141 return address_space_ldq_internal(as, addr, attrs, result,
3142 DEVICE_LITTLE_ENDIAN);
3143}
3144
3145uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3146 MemTxAttrs attrs, MemTxResult *result)
3147{
3148 return address_space_ldq_internal(as, addr, attrs, result,
3149 DEVICE_BIG_ENDIAN);
3150}
3151
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003152uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003153{
Peter Maydell50013112015-04-26 16:49:24 +01003154 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003155}
3156
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003157uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003158{
Peter Maydell50013112015-04-26 16:49:24 +01003159 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003160}
3161
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003162uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003163{
Peter Maydell50013112015-04-26 16:49:24 +01003164 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003165}
3166
bellardaab33092005-10-30 20:48:42 +00003167/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003168uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3169 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003170{
3171 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003172 MemTxResult r;
3173
3174 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3175 if (result) {
3176 *result = r;
3177 }
bellardaab33092005-10-30 20:48:42 +00003178 return val;
3179}
3180
Peter Maydell50013112015-04-26 16:49:24 +01003181uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3182{
3183 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3184}
3185
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003186/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003187static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3188 hwaddr addr,
3189 MemTxAttrs attrs,
3190 MemTxResult *result,
3191 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003192{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003193 uint8_t *ptr;
3194 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003195 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003196 hwaddr l = 2;
3197 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003198 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003199 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003200
Paolo Bonzini41063e12015-03-18 14:21:43 +01003201 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003202 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003203 false);
3204 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003205 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003206
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003207 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003208 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003209#if defined(TARGET_WORDS_BIGENDIAN)
3210 if (endian == DEVICE_LITTLE_ENDIAN) {
3211 val = bswap16(val);
3212 }
3213#else
3214 if (endian == DEVICE_BIG_ENDIAN) {
3215 val = bswap16(val);
3216 }
3217#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003218 } else {
3219 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003220 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003221 switch (endian) {
3222 case DEVICE_LITTLE_ENDIAN:
3223 val = lduw_le_p(ptr);
3224 break;
3225 case DEVICE_BIG_ENDIAN:
3226 val = lduw_be_p(ptr);
3227 break;
3228 default:
3229 val = lduw_p(ptr);
3230 break;
3231 }
Peter Maydell50013112015-04-26 16:49:24 +01003232 r = MEMTX_OK;
3233 }
3234 if (result) {
3235 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003236 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003237 if (release_lock) {
3238 qemu_mutex_unlock_iothread();
3239 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003240 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003241 return val;
bellardaab33092005-10-30 20:48:42 +00003242}
3243
Peter Maydell50013112015-04-26 16:49:24 +01003244uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3245 MemTxAttrs attrs, MemTxResult *result)
3246{
3247 return address_space_lduw_internal(as, addr, attrs, result,
3248 DEVICE_NATIVE_ENDIAN);
3249}
3250
3251uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3252 MemTxAttrs attrs, MemTxResult *result)
3253{
3254 return address_space_lduw_internal(as, addr, attrs, result,
3255 DEVICE_LITTLE_ENDIAN);
3256}
3257
3258uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3259 MemTxAttrs attrs, MemTxResult *result)
3260{
3261 return address_space_lduw_internal(as, addr, attrs, result,
3262 DEVICE_BIG_ENDIAN);
3263}
3264
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003265uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003266{
Peter Maydell50013112015-04-26 16:49:24 +01003267 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003268}
3269
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003270uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003271{
Peter Maydell50013112015-04-26 16:49:24 +01003272 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003273}
3274
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003275uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003276{
Peter Maydell50013112015-04-26 16:49:24 +01003277 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003278}
3279
bellard8df1cd02005-01-28 22:37:22 +00003280/* warning: addr must be aligned. The ram page is not masked as dirty
3281 and the code inside is not invalidated. It is useful if the dirty
3282 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003283void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3284 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003285{
bellard8df1cd02005-01-28 22:37:22 +00003286 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003287 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003288 hwaddr l = 4;
3289 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003290 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003291 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003292 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003293
Paolo Bonzini41063e12015-03-18 14:21:43 +01003294 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003295 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003296 true);
3297 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003298 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003299
Peter Maydell50013112015-04-26 16:49:24 +01003300 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003301 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003302 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003303 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003304
Paolo Bonzini845b6212015-03-23 11:45:53 +01003305 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3306 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003307 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3308 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003309 r = MEMTX_OK;
3310 }
3311 if (result) {
3312 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003313 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003314 if (release_lock) {
3315 qemu_mutex_unlock_iothread();
3316 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003317 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003318}
3319
Peter Maydell50013112015-04-26 16:49:24 +01003320void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3321{
3322 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3323}
3324
bellard8df1cd02005-01-28 22:37:22 +00003325/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003326static inline void address_space_stl_internal(AddressSpace *as,
3327 hwaddr addr, uint32_t val,
3328 MemTxAttrs attrs,
3329 MemTxResult *result,
3330 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003331{
bellard8df1cd02005-01-28 22:37:22 +00003332 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003333 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003334 hwaddr l = 4;
3335 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003336 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003337 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003338
Paolo Bonzini41063e12015-03-18 14:21:43 +01003339 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003340 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003341 true);
3342 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003343 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003344
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003345#if defined(TARGET_WORDS_BIGENDIAN)
3346 if (endian == DEVICE_LITTLE_ENDIAN) {
3347 val = bswap32(val);
3348 }
3349#else
3350 if (endian == DEVICE_BIG_ENDIAN) {
3351 val = bswap32(val);
3352 }
3353#endif
Peter Maydell50013112015-04-26 16:49:24 +01003354 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003355 } else {
bellard8df1cd02005-01-28 22:37:22 +00003356 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003357 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358 switch (endian) {
3359 case DEVICE_LITTLE_ENDIAN:
3360 stl_le_p(ptr, val);
3361 break;
3362 case DEVICE_BIG_ENDIAN:
3363 stl_be_p(ptr, val);
3364 break;
3365 default:
3366 stl_p(ptr, val);
3367 break;
3368 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003369 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003370 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003371 }
Peter Maydell50013112015-04-26 16:49:24 +01003372 if (result) {
3373 *result = r;
3374 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003375 if (release_lock) {
3376 qemu_mutex_unlock_iothread();
3377 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003378 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003379}
3380
3381void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3382 MemTxAttrs attrs, MemTxResult *result)
3383{
3384 address_space_stl_internal(as, addr, val, attrs, result,
3385 DEVICE_NATIVE_ENDIAN);
3386}
3387
3388void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3389 MemTxAttrs attrs, MemTxResult *result)
3390{
3391 address_space_stl_internal(as, addr, val, attrs, result,
3392 DEVICE_LITTLE_ENDIAN);
3393}
3394
3395void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3396 MemTxAttrs attrs, MemTxResult *result)
3397{
3398 address_space_stl_internal(as, addr, val, attrs, result,
3399 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003400}
3401
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003402void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003403{
Peter Maydell50013112015-04-26 16:49:24 +01003404 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003405}
3406
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003407void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003408{
Peter Maydell50013112015-04-26 16:49:24 +01003409 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003410}
3411
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003412void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003413{
Peter Maydell50013112015-04-26 16:49:24 +01003414 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003415}
3416
bellardaab33092005-10-30 20:48:42 +00003417/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003418void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3419 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003420{
3421 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003422 MemTxResult r;
3423
3424 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3425 if (result) {
3426 *result = r;
3427 }
3428}
3429
3430void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3431{
3432 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003433}
3434
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003435/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003436static inline void address_space_stw_internal(AddressSpace *as,
3437 hwaddr addr, uint32_t val,
3438 MemTxAttrs attrs,
3439 MemTxResult *result,
3440 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003441{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003442 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003443 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003444 hwaddr l = 2;
3445 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003446 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003447 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003448
Paolo Bonzini41063e12015-03-18 14:21:43 +01003449 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003450 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003451 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003452 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003453
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003454#if defined(TARGET_WORDS_BIGENDIAN)
3455 if (endian == DEVICE_LITTLE_ENDIAN) {
3456 val = bswap16(val);
3457 }
3458#else
3459 if (endian == DEVICE_BIG_ENDIAN) {
3460 val = bswap16(val);
3461 }
3462#endif
Peter Maydell50013112015-04-26 16:49:24 +01003463 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003464 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003465 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003466 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003467 switch (endian) {
3468 case DEVICE_LITTLE_ENDIAN:
3469 stw_le_p(ptr, val);
3470 break;
3471 case DEVICE_BIG_ENDIAN:
3472 stw_be_p(ptr, val);
3473 break;
3474 default:
3475 stw_p(ptr, val);
3476 break;
3477 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003478 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003479 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003480 }
Peter Maydell50013112015-04-26 16:49:24 +01003481 if (result) {
3482 *result = r;
3483 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003484 if (release_lock) {
3485 qemu_mutex_unlock_iothread();
3486 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003487 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003488}
3489
3490void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3491 MemTxAttrs attrs, MemTxResult *result)
3492{
3493 address_space_stw_internal(as, addr, val, attrs, result,
3494 DEVICE_NATIVE_ENDIAN);
3495}
3496
3497void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3498 MemTxAttrs attrs, MemTxResult *result)
3499{
3500 address_space_stw_internal(as, addr, val, attrs, result,
3501 DEVICE_LITTLE_ENDIAN);
3502}
3503
3504void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3505 MemTxAttrs attrs, MemTxResult *result)
3506{
3507 address_space_stw_internal(as, addr, val, attrs, result,
3508 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003509}
3510
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003511void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003512{
Peter Maydell50013112015-04-26 16:49:24 +01003513 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003514}
3515
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003516void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003517{
Peter Maydell50013112015-04-26 16:49:24 +01003518 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003519}
3520
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003521void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003522{
Peter Maydell50013112015-04-26 16:49:24 +01003523 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003524}
3525
bellardaab33092005-10-30 20:48:42 +00003526/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003527void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3528 MemTxAttrs attrs, MemTxResult *result)
3529{
3530 MemTxResult r;
3531 val = tswap64(val);
3532 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3533 if (result) {
3534 *result = r;
3535 }
3536}
3537
3538void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3539 MemTxAttrs attrs, MemTxResult *result)
3540{
3541 MemTxResult r;
3542 val = cpu_to_le64(val);
3543 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3544 if (result) {
3545 *result = r;
3546 }
3547}
3548void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3549 MemTxAttrs attrs, MemTxResult *result)
3550{
3551 MemTxResult r;
3552 val = cpu_to_be64(val);
3553 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3554 if (result) {
3555 *result = r;
3556 }
3557}
3558
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003559void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003560{
Peter Maydell50013112015-04-26 16:49:24 +01003561 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003562}
3563
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003564void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003565{
Peter Maydell50013112015-04-26 16:49:24 +01003566 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003567}
3568
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003569void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003570{
Peter Maydell50013112015-04-26 16:49:24 +01003571 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003572}
3573
aliguori5e2972f2009-03-28 17:51:36 +00003574/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003575int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003576 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003577{
3578 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003579 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003580 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003581
3582 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003583 int asidx;
3584 MemTxAttrs attrs;
3585
bellard13eb76e2004-01-24 15:23:36 +00003586 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003587 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3588 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003589 /* if no physical page mapped, return an error */
3590 if (phys_addr == -1)
3591 return -1;
3592 l = (page + TARGET_PAGE_SIZE) - addr;
3593 if (l > len)
3594 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003595 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003596 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003597 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3598 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003599 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003600 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3601 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003602 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003603 }
bellard13eb76e2004-01-24 15:23:36 +00003604 len -= l;
3605 buf += l;
3606 addr += l;
3607 }
3608 return 0;
3609}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003610
3611/*
3612 * Allows code that needs to deal with migration bitmaps etc to still be built
3613 * target independent.
3614 */
3615size_t qemu_target_page_bits(void)
3616{
3617 return TARGET_PAGE_BITS;
3618}
3619
Paul Brooka68fe892010-03-01 00:08:59 +00003620#endif
bellard13eb76e2004-01-24 15:23:36 +00003621
Blue Swirl8e4a4242013-01-06 18:30:17 +00003622/*
3623 * A helper function for the _utterly broken_ virtio device model to find out if
3624 * it's running on a big endian machine. Don't do this at home kids!
3625 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003626bool target_words_bigendian(void);
3627bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003628{
3629#if defined(TARGET_WORDS_BIGENDIAN)
3630 return true;
3631#else
3632 return false;
3633#endif
3634}
3635
Wen Congyang76f35532012-05-07 12:04:18 +08003636#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003637bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003638{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003639 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003640 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003641 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003642
Paolo Bonzini41063e12015-03-18 14:21:43 +01003643 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003644 mr = address_space_translate(&address_space_memory,
3645 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003646
Paolo Bonzini41063e12015-03-18 14:21:43 +01003647 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3648 rcu_read_unlock();
3649 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003650}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003651
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003652int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003653{
3654 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003655 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003656
Mike Day0dc3f442013-09-05 14:41:35 -04003657 rcu_read_lock();
3658 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003659 ret = func(block->idstr, block->host, block->offset,
3660 block->used_length, opaque);
3661 if (ret) {
3662 break;
3663 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003664 }
Mike Day0dc3f442013-09-05 14:41:35 -04003665 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003666 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003667}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003668#endif