blob: ae45a707b64ae53623942357cff1be3256cc4059 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Peter Lieven101420b2016-07-15 12:03:50 +0200190 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200195 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197}
198
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200{
201 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200202 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 PhysPageEntry e;
204 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210
211 e.skip = leaf ? 0 : 1;
212 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200214 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200217}
218
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200219static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
220 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200221 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222{
223 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200226 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200229 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200231
Paolo Bonzini03f49952013-11-07 17:14:36 +0100232 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200234 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200235 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200236 *index += step;
237 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200238 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200239 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200240 }
241 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200242 }
243}
244
Avi Kivityac1970f2012-10-03 16:22:53 +0200245static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200246 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200247 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000248{
Avi Kivity29990972012-02-13 20:21:20 +0200249 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000251
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200252 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000253}
254
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200255/* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
257 */
258static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
259{
260 unsigned valid_ptr = P_L2_SIZE;
261 int valid = 0;
262 PhysPageEntry *p;
263 int i;
264
265 if (lp->ptr == PHYS_MAP_NODE_NIL) {
266 return;
267 }
268
269 p = nodes[lp->ptr];
270 for (i = 0; i < P_L2_SIZE; i++) {
271 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272 continue;
273 }
274
275 valid_ptr = i;
276 valid++;
277 if (p[i].skip) {
278 phys_page_compact(&p[i], nodes, compacted);
279 }
280 }
281
282 /* We can only compress if there's only one child. */
283 if (valid != 1) {
284 return;
285 }
286
287 assert(valid_ptr < P_L2_SIZE);
288
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291 return;
292 }
293
294 lp->ptr = p[valid_ptr].ptr;
295 if (!p[valid_ptr].skip) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
300 * change this rule.
301 */
302 lp->skip = 0;
303 } else {
304 lp->skip += p[valid_ptr].skip;
305 }
306}
307
308static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309{
310 DECLARE_BITMAP(compacted, nodes_nb);
311
312 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200313 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200314 }
315}
316
Fam Zheng29cb5332016-03-01 14:18:23 +0800317static inline bool section_covers_addr(const MemoryRegionSection *section,
318 hwaddr addr)
319{
320 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
321 * the section must cover the entire address space.
322 */
323 return section->size.hi ||
324 range_covers_byte(section->offset_within_address_space,
325 section->size.lo, addr);
326}
327
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200328static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200329 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000330{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200332 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200334
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200335 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200336 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200338 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200339 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100340 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200341 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342
Fam Zheng29cb5332016-03-01 14:18:23 +0800343 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 return &sections[lp.ptr];
345 } else {
346 return &sections[PHYS_SECTION_UNASSIGNED];
347 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200348}
349
Blue Swirle5548612012-04-21 13:08:33 +0000350bool memory_region_is_unassigned(MemoryRegion *mr)
351{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200352 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000353 && mr != &io_mem_watch;
354}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200355
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100356/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200357static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200358 hwaddr addr,
359 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200360{
Fam Zheng729633c2016-03-01 14:18:24 +0800361 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800363 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200364
Fam Zheng729633c2016-03-01 14:18:24 +0800365 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
366 section_covers_addr(section, addr)) {
367 update = false;
368 } else {
369 section = phys_page_find(d->phys_map, addr, d->map.nodes,
370 d->map.sections);
371 update = true;
372 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 if (resolve_subpage && section->mr->subpage) {
374 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200375 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200376 }
Fam Zheng729633c2016-03-01 14:18:24 +0800377 if (update) {
378 atomic_set(&d->mru_section, section);
379 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200381}
382
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100383/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200384static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200385address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200386 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387{
388 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100390 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200392 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 /* Compute offset within MemoryRegionSection */
394 addr -= section->offset_within_address_space;
395
396 /* Compute offset within MemoryRegion */
397 *xlat = addr + section->offset_within_region;
398
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200399 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200400
401 /* MMIO registers can be expected to perform full-width accesses based only
402 * on their address, without considering adjacent registers that could
403 * decode to completely different MemoryRegions. When such registers
404 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405 * regions overlap wildly. For this reason we cannot clamp the accesses
406 * here.
407 *
408 * If the length is small (as is the case for address_space_ldl/stl),
409 * everything works fine. If the incoming length is large, however,
410 * the caller really has to do the clamping through memory_access_size.
411 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200413 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200414 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
415 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200416 return section;
417}
Jan Kiszka90260c62013-05-26 21:46:51 +0200418
Paolo Bonzini41063e12015-03-18 14:21:43 +0100419/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200420MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
421 hwaddr *xlat, hwaddr *plen,
422 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200423{
Avi Kivity30951152012-10-30 13:47:46 +0200424 IOMMUTLBEntry iotlb;
425 MemoryRegionSection *section;
426 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200427
428 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100429 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
430 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200431 mr = section->mr;
432
433 if (!mr->iommu_ops) {
434 break;
435 }
436
Le Tan8d7b8cb2014-08-16 13:55:37 +0800437 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200438 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
439 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200441 if (!(iotlb.perm & (1 << is_write))) {
442 mr = &io_mem_unassigned;
443 break;
444 }
445
446 as = iotlb.target_as;
447 }
448
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000449 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700451 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100452 }
453
Avi Kivity30951152012-10-30 13:47:46 +0200454 *xlat = addr;
455 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200456}
457
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100458/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200459MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000460address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200461 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200462{
Avi Kivity30951152012-10-30 13:47:46 +0200463 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000464 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
465
466 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200467
468 assert(!section->mr->iommu_ops);
469 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200470}
bellard9fa3e852004-01-04 18:06:42 +0000471#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000472
Andreas Färberb170fce2013-01-20 20:23:22 +0100473#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000474
Juan Quintelae59fb372009-09-29 22:48:21 +0200475static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476{
Andreas Färber259186a2013-01-17 18:51:17 +0100477 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200478
aurel323098dba2009-03-07 21:28:24 +0000479 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
480 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100481 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100482 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000483
484 return 0;
485}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487static int cpu_common_pre_load(void *opaque)
488{
489 CPUState *cpu = opaque;
490
Paolo Bonziniadee6422014-12-19 12:53:14 +0100491 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400492
493 return 0;
494}
495
496static bool cpu_common_exception_index_needed(void *opaque)
497{
498 CPUState *cpu = opaque;
499
Paolo Bonziniadee6422014-12-19 12:53:14 +0100500 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400501}
502
503static const VMStateDescription vmstate_cpu_common_exception_index = {
504 .name = "cpu_common/exception_index",
505 .version_id = 1,
506 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200507 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400508 .fields = (VMStateField[]) {
509 VMSTATE_INT32(exception_index, CPUState),
510 VMSTATE_END_OF_LIST()
511 }
512};
513
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300514static bool cpu_common_crash_occurred_needed(void *opaque)
515{
516 CPUState *cpu = opaque;
517
518 return cpu->crash_occurred;
519}
520
521static const VMStateDescription vmstate_cpu_common_crash_occurred = {
522 .name = "cpu_common/crash_occurred",
523 .version_id = 1,
524 .minimum_version_id = 1,
525 .needed = cpu_common_crash_occurred_needed,
526 .fields = (VMStateField[]) {
527 VMSTATE_BOOL(crash_occurred, CPUState),
528 VMSTATE_END_OF_LIST()
529 }
530};
531
Andreas Färber1a1562f2013-06-17 04:09:11 +0200532const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200533 .name = "cpu_common",
534 .version_id = 1,
535 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200537 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200538 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100539 VMSTATE_UINT32(halted, CPUState),
540 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400542 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 .subsections = (const VMStateDescription*[]) {
544 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300545 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200546 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200547 }
548};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200549
pbrook9656f322008-07-01 20:01:19 +0000550#endif
551
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100552CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400553{
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200558 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100559 }
Glauber Costa950f1472009-06-09 12:15:18 -0400560 }
561
Andreas Färberbdc44642013-06-24 23:50:24 +0200562 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400563}
564
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000566void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000567{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000568 CPUAddressSpace *newas;
569
570 /* Target code should have set num_ases before calling us */
571 assert(asidx < cpu->num_ases);
572
Peter Maydell56943e82016-01-21 14:15:04 +0000573 if (asidx == 0) {
574 /* address space 0 gets the convenience alias */
575 cpu->as = as;
576 }
577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 /* KVM cannot currently support multiple address spaces. */
579 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000581 if (!cpu->cpu_ases) {
582 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000583 }
Peter Maydell32857f42015-10-01 15:29:50 +0100584
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000585 newas = &cpu->cpu_ases[asidx];
586 newas->cpu = cpu;
587 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000588 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000589 newas->tcg_as_listener.commit = tcg_commit;
590 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000591 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000592}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000593
594AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
595{
596 /* Return the AddressSpace corresponding to the specified index */
597 return cpu->cpu_ases[asidx].as;
598}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599#endif
600
Bharata B Raob7bca732015-06-23 19:31:13 -0700601#ifndef CONFIG_USER_ONLY
602static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
603
604static int cpu_get_free_index(Error **errp)
605{
606 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
607
608 if (cpu >= MAX_CPUMASK_BITS) {
609 error_setg(errp, "Trying to use more CPUs than max of %d",
610 MAX_CPUMASK_BITS);
611 return -1;
612 }
613
614 bitmap_set(cpu_index_map, cpu, 1);
615 return cpu;
616}
617
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530618static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700619{
Bharata B Raob7bca732015-06-23 19:31:13 -0700620 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
Bharata B Raob7bca732015-06-23 19:31:13 -0700621}
622#else
623
624static int cpu_get_free_index(Error **errp)
625{
626 CPUState *some_cpu;
627 int cpu_index = 0;
628
629 CPU_FOREACH(some_cpu) {
630 cpu_index++;
631 }
632 return cpu_index;
633}
634
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530635static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700636{
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530637 return;
Bharata B Raob7bca732015-06-23 19:31:13 -0700638}
639#endif
640
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530641void cpu_exec_exit(CPUState *cpu)
642{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530643 CPUClass *cc = CPU_GET_CLASS(cpu);
644
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530645 cpu_list_lock();
Igor Mammedov8b1b8352016-07-25 11:59:20 +0200646 if (cpu->node.tqe_prev == NULL) {
647 /* there is nothing to undo since cpu_exec_init() hasn't been called */
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530648 cpu_list_unlock();
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530649 return;
650 }
651
652 QTAILQ_REMOVE(&cpus, cpu, node);
Igor Mammedov8b1b8352016-07-25 11:59:20 +0200653 cpu->node.tqe_prev = NULL;
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530654 cpu_release_index(cpu);
655 cpu->cpu_index = -1;
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530656 cpu_list_unlock();
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530657
658 if (cc->vmsd != NULL) {
659 vmstate_unregister(NULL, cc->vmsd, cpu);
660 }
661 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
662 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
663 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530664}
665
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700666void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000667{
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200668 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700669 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000670
Peter Maydell56943e82016-01-21 14:15:04 +0000671 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000672 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000673
Eduardo Habkost291135b2015-04-27 17:00:33 -0300674#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300675 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000676
677 /* This is a softmmu CPU object, so create a property for it
678 * so users can wire up its memory. (This can't go in qom/cpu.c
679 * because that file is compiled only once for both user-mode
680 * and system builds.) The default if no link is set up is to use
681 * the system address space.
682 */
683 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
684 (Object **)&cpu->memory,
685 qdev_prop_allow_set_link_before_realize,
686 OBJ_PROP_LINK_UNREF_ON_RELEASE,
687 &error_abort);
688 cpu->memory = system_memory;
689 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300690#endif
691
pbrookc2764712009-03-07 15:24:59 +0000692 cpu_list_lock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200693 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700694 if (local_err) {
695 error_propagate(errp, local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700696 cpu_list_unlock();
Bharata B Raob7bca732015-06-23 19:31:13 -0700697 return;
bellard6a00d602005-11-21 23:25:50 +0000698 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200699 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000700 cpu_list_unlock();
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200701
702#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200703 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200704 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200705 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100706 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200707 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100708 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200709#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000710}
711
Paul Brook94df27f2010-02-28 23:47:45 +0000712#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200713static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000714{
715 tb_invalidate_phys_page_range(pc, pc + 1, 0);
716}
717#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200718static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400719{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000720 MemTxAttrs attrs;
721 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
722 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400723 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000724 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100725 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400726 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400727}
bellardc27004e2005-01-03 23:35:10 +0000728#endif
bellardd720b932004-04-25 17:57:43 +0000729
Paul Brookc527ee82010-03-01 03:31:14 +0000730#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200731void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000732
733{
734}
735
Peter Maydell3ee887e2014-09-12 14:06:48 +0100736int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
737 int flags)
738{
739 return -ENOSYS;
740}
741
742void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
743{
744}
745
Andreas Färber75a34032013-09-02 16:57:02 +0200746int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000747 int flags, CPUWatchpoint **watchpoint)
748{
749 return -ENOSYS;
750}
751#else
pbrook6658ffb2007-03-16 23:58:11 +0000752/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200753int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000754 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000755{
aliguoric0ce9982008-11-25 22:13:57 +0000756 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000757
Peter Maydell05068c02014-09-12 14:06:48 +0100758 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700759 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200760 error_report("tried to set invalid watchpoint at %"
761 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000762 return -EINVAL;
763 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500764 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000765
aliguoria1d1bb32008-11-18 20:07:32 +0000766 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100767 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000768 wp->flags = flags;
769
aliguori2dc9f412008-11-18 20:56:59 +0000770 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200771 if (flags & BP_GDB) {
772 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
773 } else {
774 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
775 }
aliguoria1d1bb32008-11-18 20:07:32 +0000776
Andreas Färber31b030d2013-09-04 01:29:02 +0200777 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000778
779 if (watchpoint)
780 *watchpoint = wp;
781 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000782}
783
aliguoria1d1bb32008-11-18 20:07:32 +0000784/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200785int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000786 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000787{
aliguoria1d1bb32008-11-18 20:07:32 +0000788 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000789
Andreas Färberff4700b2013-08-26 18:23:18 +0200790 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100791 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000792 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200793 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000794 return 0;
795 }
796 }
aliguoria1d1bb32008-11-18 20:07:32 +0000797 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000798}
799
aliguoria1d1bb32008-11-18 20:07:32 +0000800/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200801void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000802{
Andreas Färberff4700b2013-08-26 18:23:18 +0200803 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000804
Andreas Färber31b030d2013-09-04 01:29:02 +0200805 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000806
Anthony Liguori7267c092011-08-20 22:09:37 -0500807 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000808}
809
aliguoria1d1bb32008-11-18 20:07:32 +0000810/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200811void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000812{
aliguoric0ce9982008-11-25 22:13:57 +0000813 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000814
Andreas Färberff4700b2013-08-26 18:23:18 +0200815 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200816 if (wp->flags & mask) {
817 cpu_watchpoint_remove_by_ref(cpu, wp);
818 }
aliguoric0ce9982008-11-25 22:13:57 +0000819 }
aliguoria1d1bb32008-11-18 20:07:32 +0000820}
Peter Maydell05068c02014-09-12 14:06:48 +0100821
822/* Return true if this watchpoint address matches the specified
823 * access (ie the address range covered by the watchpoint overlaps
824 * partially or completely with the address range covered by the
825 * access).
826 */
827static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
828 vaddr addr,
829 vaddr len)
830{
831 /* We know the lengths are non-zero, but a little caution is
832 * required to avoid errors in the case where the range ends
833 * exactly at the top of the address space and so addr + len
834 * wraps round to zero.
835 */
836 vaddr wpend = wp->vaddr + wp->len - 1;
837 vaddr addrend = addr + len - 1;
838
839 return !(addr > wpend || wp->vaddr > addrend);
840}
841
Paul Brookc527ee82010-03-01 03:31:14 +0000842#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000843
844/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200845int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000846 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000847{
aliguoric0ce9982008-11-25 22:13:57 +0000848 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000849
Anthony Liguori7267c092011-08-20 22:09:37 -0500850 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000851
852 bp->pc = pc;
853 bp->flags = flags;
854
aliguori2dc9f412008-11-18 20:56:59 +0000855 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200856 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200857 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200858 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200859 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200860 }
aliguoria1d1bb32008-11-18 20:07:32 +0000861
Andreas Färberf0c3c502013-08-26 21:22:53 +0200862 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000863
Andreas Färber00b941e2013-06-29 18:55:54 +0200864 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000865 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200866 }
aliguoria1d1bb32008-11-18 20:07:32 +0000867 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000868}
869
870/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200871int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000872{
aliguoria1d1bb32008-11-18 20:07:32 +0000873 CPUBreakpoint *bp;
874
Andreas Färberf0c3c502013-08-26 21:22:53 +0200875 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000876 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200877 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000878 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000879 }
bellard4c3a88a2003-07-26 12:06:08 +0000880 }
aliguoria1d1bb32008-11-18 20:07:32 +0000881 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000882}
883
aliguoria1d1bb32008-11-18 20:07:32 +0000884/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200885void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000886{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200887 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
888
889 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000890
Anthony Liguori7267c092011-08-20 22:09:37 -0500891 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000892}
893
894/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200895void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000896{
aliguoric0ce9982008-11-25 22:13:57 +0000897 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000898
Andreas Färberf0c3c502013-08-26 21:22:53 +0200899 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200900 if (bp->flags & mask) {
901 cpu_breakpoint_remove_by_ref(cpu, bp);
902 }
aliguoric0ce9982008-11-25 22:13:57 +0000903 }
bellard4c3a88a2003-07-26 12:06:08 +0000904}
905
bellardc33a3462003-07-29 20:50:33 +0000906/* enable or disable single step mode. EXCP_DEBUG is returned by the
907 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200908void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000909{
Andreas Färbered2803d2013-06-21 20:20:45 +0200910 if (cpu->singlestep_enabled != enabled) {
911 cpu->singlestep_enabled = enabled;
912 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200913 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200914 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100915 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000916 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700917 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000918 }
bellardc33a3462003-07-29 20:50:33 +0000919 }
bellardc33a3462003-07-29 20:50:33 +0000920}
921
Andreas Färbera47dddd2013-09-03 17:38:47 +0200922void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000923{
924 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000925 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000926
927 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000928 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000929 fprintf(stderr, "qemu: fatal: ");
930 vfprintf(stderr, fmt, ap);
931 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200932 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100933 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000934 qemu_log("qemu: fatal: ");
935 qemu_log_vprintf(fmt, ap2);
936 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200937 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000938 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000939 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000940 }
pbrook493ae1f2007-11-23 16:53:59 +0000941 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000942 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300943 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200944#if defined(CONFIG_USER_ONLY)
945 {
946 struct sigaction act;
947 sigfillset(&act.sa_mask);
948 act.sa_handler = SIG_DFL;
949 sigaction(SIGABRT, &act, NULL);
950 }
951#endif
bellard75012672003-06-21 13:11:07 +0000952 abort();
953}
954
bellard01243112004-01-04 15:48:17 +0000955#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400956/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200957static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
958{
959 RAMBlock *block;
960
Paolo Bonzini43771532013-09-09 17:58:40 +0200961 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200962 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200963 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200964 }
Mike Day0dc3f442013-09-05 14:41:35 -0400965 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200966 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200967 goto found;
968 }
969 }
970
971 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
972 abort();
973
974found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200975 /* It is safe to write mru_block outside the iothread lock. This
976 * is what happens:
977 *
978 * mru_block = xxx
979 * rcu_read_unlock()
980 * xxx removed from list
981 * rcu_read_lock()
982 * read mru_block
983 * mru_block = NULL;
984 * call_rcu(reclaim_ramblock, xxx);
985 * rcu_read_unlock()
986 *
987 * atomic_rcu_set is not needed here. The block was already published
988 * when it was placed into the list. Here we're just making an extra
989 * copy of the pointer.
990 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200991 ram_list.mru_block = block;
992 return block;
993}
994
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200995static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000996{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700997 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200998 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200999 RAMBlock *block;
1000 ram_addr_t end;
1001
1002 end = TARGET_PAGE_ALIGN(start + length);
1003 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +00001004
Mike Day0dc3f442013-09-05 14:41:35 -04001005 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +02001006 block = qemu_get_ram_block(start);
1007 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001008 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001009 CPU_FOREACH(cpu) {
1010 tlb_reset_dirty(cpu, start1, length);
1011 }
Mike Day0dc3f442013-09-05 14:41:35 -04001012 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +02001013}
1014
1015/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001016bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1017 ram_addr_t length,
1018 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001019{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001020 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001021 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001022 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001023
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001024 if (length == 0) {
1025 return false;
1026 }
1027
1028 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1029 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001030
1031 rcu_read_lock();
1032
1033 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1034
1035 while (page < end) {
1036 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1037 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1038 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1039
1040 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1041 offset, num);
1042 page += num;
1043 }
1044
1045 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001046
1047 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001048 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001049 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001050
1051 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001052}
1053
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001054/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001055hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001056 MemoryRegionSection *section,
1057 target_ulong vaddr,
1058 hwaddr paddr, hwaddr xlat,
1059 int prot,
1060 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001061{
Avi Kivitya8170e52012-10-23 12:30:10 +02001062 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001063 CPUWatchpoint *wp;
1064
Blue Swirlcc5bea62012-04-14 14:56:48 +00001065 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001066 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001067 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001068 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001069 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001070 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001071 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001072 }
1073 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001074 AddressSpaceDispatch *d;
1075
1076 d = atomic_rcu_read(&section->address_space->dispatch);
1077 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001078 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001079 }
1080
1081 /* Make accesses to pages with watchpoints go via the
1082 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001083 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001084 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001085 /* Avoid trapping reads of pages with a write breakpoint. */
1086 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001087 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001088 *address |= TLB_MMIO;
1089 break;
1090 }
1091 }
1092 }
1093
1094 return iotlb;
1095}
bellard9fa3e852004-01-04 18:06:42 +00001096#endif /* defined(CONFIG_USER_ONLY) */
1097
pbrooke2eef172008-06-08 01:09:01 +00001098#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001099
Anthony Liguoric227f092009-10-01 16:12:16 -05001100static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001101 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001102static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001103
Igor Mammedova2b257d2014-10-31 16:38:37 +00001104static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1105 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001106
1107/*
1108 * Set a custom physical guest memory alloator.
1109 * Accelerators with unusual needs may need this. Hopefully, we can
1110 * get rid of it eventually.
1111 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001112void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001113{
1114 phys_mem_alloc = alloc;
1115}
1116
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001117static uint16_t phys_section_add(PhysPageMap *map,
1118 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001119{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001120 /* The physical section number is ORed with a page-aligned
1121 * pointer to produce the iotlb entries. Thus it should
1122 * never overflow into the page-aligned value.
1123 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001124 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001125
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001126 if (map->sections_nb == map->sections_nb_alloc) {
1127 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1128 map->sections = g_renew(MemoryRegionSection, map->sections,
1129 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001130 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001131 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001132 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001133 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001134}
1135
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001136static void phys_section_destroy(MemoryRegion *mr)
1137{
Don Slutz55b4e802015-11-30 17:11:04 -05001138 bool have_sub_page = mr->subpage;
1139
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001140 memory_region_unref(mr);
1141
Don Slutz55b4e802015-11-30 17:11:04 -05001142 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001143 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001144 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001145 g_free(subpage);
1146 }
1147}
1148
Paolo Bonzini60926662013-05-29 12:30:26 +02001149static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001150{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001151 while (map->sections_nb > 0) {
1152 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001153 phys_section_destroy(section->mr);
1154 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001155 g_free(map->sections);
1156 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001157}
1158
Avi Kivityac1970f2012-10-03 16:22:53 +02001159static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001160{
1161 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001162 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001163 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001164 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001165 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001166 MemoryRegionSection subsection = {
1167 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001168 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001169 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001170 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001171
Avi Kivityf3705d52012-03-08 16:16:34 +02001172 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001173
Avi Kivityf3705d52012-03-08 16:16:34 +02001174 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001175 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001176 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001177 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001178 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001179 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001180 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001181 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001182 }
1183 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001184 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001185 subpage_register(subpage, start, end,
1186 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001187}
1188
1189
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001190static void register_multipage(AddressSpaceDispatch *d,
1191 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001192{
Avi Kivitya8170e52012-10-23 12:30:10 +02001193 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001194 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001195 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1196 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001197
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001198 assert(num_pages);
1199 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001200}
1201
Avi Kivityac1970f2012-10-03 16:22:53 +02001202static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001203{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001204 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001205 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001206 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001207 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001208
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001209 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1210 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1211 - now.offset_within_address_space;
1212
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001213 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001214 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001215 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001216 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001217 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001218 while (int128_ne(remain.size, now.size)) {
1219 remain.size = int128_sub(remain.size, now.size);
1220 remain.offset_within_address_space += int128_get64(now.size);
1221 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001222 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001223 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001224 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001225 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001226 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001227 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001228 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001229 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001230 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001231 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001232 }
1233}
1234
Sheng Yang62a27442010-01-26 19:21:16 +08001235void qemu_flush_coalesced_mmio_buffer(void)
1236{
1237 if (kvm_enabled())
1238 kvm_flush_coalesced_mmio_buffer();
1239}
1240
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001241void qemu_mutex_lock_ramlist(void)
1242{
1243 qemu_mutex_lock(&ram_list.mutex);
1244}
1245
1246void qemu_mutex_unlock_ramlist(void)
1247{
1248 qemu_mutex_unlock(&ram_list.mutex);
1249}
1250
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001251#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001252static void *file_ram_alloc(RAMBlock *block,
1253 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001254 const char *path,
1255 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001256{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001257 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001258 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001259 char *sanitized_name;
1260 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001261 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001262 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001263 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001264
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001265 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1266 error_setg(errp,
1267 "host lacks kvm mmu notifiers, -mem-path unsupported");
1268 return NULL;
1269 }
1270
1271 for (;;) {
1272 fd = open(path, O_RDWR);
1273 if (fd >= 0) {
1274 /* @path names an existing file, use it */
1275 break;
1276 }
1277 if (errno == ENOENT) {
1278 /* @path names a file that doesn't exist, create it */
1279 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1280 if (fd >= 0) {
1281 unlink_on_error = true;
1282 break;
1283 }
1284 } else if (errno == EISDIR) {
1285 /* @path names a directory, create a file there */
1286 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1287 sanitized_name = g_strdup(memory_region_name(block->mr));
1288 for (c = sanitized_name; *c != '\0'; c++) {
1289 if (*c == '/') {
1290 *c = '_';
1291 }
1292 }
1293
1294 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1295 sanitized_name);
1296 g_free(sanitized_name);
1297
1298 fd = mkstemp(filename);
1299 if (fd >= 0) {
1300 unlink(filename);
1301 g_free(filename);
1302 break;
1303 }
1304 g_free(filename);
1305 }
1306 if (errno != EEXIST && errno != EINTR) {
1307 error_setg_errno(errp, errno,
1308 "can't open backing store %s for guest RAM",
1309 path);
1310 goto error;
1311 }
1312 /*
1313 * Try again on EINTR and EEXIST. The latter happens when
1314 * something else creates the file between our two open().
1315 */
1316 }
1317
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001318 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001319 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001320
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001321 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001322 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001323 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001324 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001325 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001326 }
1327
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001328 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001329
1330 /*
1331 * ftruncate is not supported by hugetlbfs in older
1332 * hosts, so don't bother bailing out on errors.
1333 * If anything goes wrong with it under other filesystems,
1334 * mmap will fail.
1335 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001336 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001337 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001338 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001339
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001340 area = qemu_ram_mmap(fd, memory, block->mr->align,
1341 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001342 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001343 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001344 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001345 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001346 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001347
1348 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001349 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001350 }
1351
Alex Williamson04b16652010-07-02 11:13:17 -06001352 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001353 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001354
1355error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001356 if (unlink_on_error) {
1357 unlink(path);
1358 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001359 if (fd != -1) {
1360 close(fd);
1361 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001362 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001363}
1364#endif
1365
Mike Day0dc3f442013-09-05 14:41:35 -04001366/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001367static ram_addr_t find_ram_offset(ram_addr_t size)
1368{
Alex Williamson04b16652010-07-02 11:13:17 -06001369 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001370 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001371
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001372 assert(size != 0); /* it would hand out same offset multiple times */
1373
Mike Day0dc3f442013-09-05 14:41:35 -04001374 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001375 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001376 }
Alex Williamson04b16652010-07-02 11:13:17 -06001377
Mike Day0dc3f442013-09-05 14:41:35 -04001378 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001379 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001380
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001381 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001382
Mike Day0dc3f442013-09-05 14:41:35 -04001383 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001384 if (next_block->offset >= end) {
1385 next = MIN(next, next_block->offset);
1386 }
1387 }
1388 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001389 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001390 mingap = next - end;
1391 }
1392 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001393
1394 if (offset == RAM_ADDR_MAX) {
1395 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1396 (uint64_t)size);
1397 abort();
1398 }
1399
Alex Williamson04b16652010-07-02 11:13:17 -06001400 return offset;
1401}
1402
Juan Quintela652d7ec2012-07-20 10:37:54 +02001403ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001404{
Alex Williamsond17b5282010-06-25 11:08:38 -06001405 RAMBlock *block;
1406 ram_addr_t last = 0;
1407
Mike Day0dc3f442013-09-05 14:41:35 -04001408 rcu_read_lock();
1409 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001410 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001411 }
Mike Day0dc3f442013-09-05 14:41:35 -04001412 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001413 return last;
1414}
1415
Jason Baronddb97f12012-08-02 15:44:16 -04001416static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1417{
1418 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001419
1420 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001421 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001422 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1423 if (ret) {
1424 perror("qemu_madvise");
1425 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1426 "but dump_guest_core=off specified\n");
1427 }
1428 }
1429}
1430
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001431const char *qemu_ram_get_idstr(RAMBlock *rb)
1432{
1433 return rb->idstr;
1434}
1435
Mike Dayae3a7042013-09-05 14:41:35 -04001436/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001437void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001438{
Gongleifa53a0e2016-05-10 10:04:59 +08001439 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001440
Avi Kivityc5705a72011-12-20 15:59:12 +02001441 assert(new_block);
1442 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001443
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001444 if (dev) {
1445 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001446 if (id) {
1447 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001448 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001449 }
1450 }
1451 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1452
Gongleiab0a9952016-05-10 10:05:00 +08001453 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001454 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001455 if (block != new_block &&
1456 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001457 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1458 new_block->idstr);
1459 abort();
1460 }
1461 }
Mike Day0dc3f442013-09-05 14:41:35 -04001462 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001463}
1464
Mike Dayae3a7042013-09-05 14:41:35 -04001465/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001466void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001467{
Mike Dayae3a7042013-09-05 14:41:35 -04001468 /* FIXME: arch_init.c assumes that this is not called throughout
1469 * migration. Ignore the problem since hot-unplug during migration
1470 * does not work anyway.
1471 */
Hu Tao20cfe882014-04-02 15:13:26 +08001472 if (block) {
1473 memset(block->idstr, 0, sizeof(block->idstr));
1474 }
1475}
1476
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001477static int memory_try_enable_merging(void *addr, size_t len)
1478{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001479 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001480 /* disabled by the user */
1481 return 0;
1482 }
1483
1484 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1485}
1486
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001487/* Only legal before guest might have detected the memory size: e.g. on
1488 * incoming migration, or right after reset.
1489 *
1490 * As memory core doesn't know how is memory accessed, it is up to
1491 * resize callback to update device state and/or add assertions to detect
1492 * misuse, if necessary.
1493 */
Gongleifa53a0e2016-05-10 10:04:59 +08001494int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001495{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001496 assert(block);
1497
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001498 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001499
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001500 if (block->used_length == newsize) {
1501 return 0;
1502 }
1503
1504 if (!(block->flags & RAM_RESIZEABLE)) {
1505 error_setg_errno(errp, EINVAL,
1506 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1507 " in != 0x" RAM_ADDR_FMT, block->idstr,
1508 newsize, block->used_length);
1509 return -EINVAL;
1510 }
1511
1512 if (block->max_length < newsize) {
1513 error_setg_errno(errp, EINVAL,
1514 "Length too large: %s: 0x" RAM_ADDR_FMT
1515 " > 0x" RAM_ADDR_FMT, block->idstr,
1516 newsize, block->max_length);
1517 return -EINVAL;
1518 }
1519
1520 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1521 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001522 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1523 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001524 memory_region_set_size(block->mr, newsize);
1525 if (block->resized) {
1526 block->resized(block->idstr, newsize, block->host);
1527 }
1528 return 0;
1529}
1530
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001531/* Called with ram_list.mutex held */
1532static void dirty_memory_extend(ram_addr_t old_ram_size,
1533 ram_addr_t new_ram_size)
1534{
1535 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1536 DIRTY_MEMORY_BLOCK_SIZE);
1537 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1538 DIRTY_MEMORY_BLOCK_SIZE);
1539 int i;
1540
1541 /* Only need to extend if block count increased */
1542 if (new_num_blocks <= old_num_blocks) {
1543 return;
1544 }
1545
1546 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1547 DirtyMemoryBlocks *old_blocks;
1548 DirtyMemoryBlocks *new_blocks;
1549 int j;
1550
1551 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1552 new_blocks = g_malloc(sizeof(*new_blocks) +
1553 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1554
1555 if (old_num_blocks) {
1556 memcpy(new_blocks->blocks, old_blocks->blocks,
1557 old_num_blocks * sizeof(old_blocks->blocks[0]));
1558 }
1559
1560 for (j = old_num_blocks; j < new_num_blocks; j++) {
1561 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1562 }
1563
1564 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1565
1566 if (old_blocks) {
1567 g_free_rcu(old_blocks, rcu);
1568 }
1569 }
1570}
1571
Fam Zheng528f46a2016-03-01 14:18:18 +08001572static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001573{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001574 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001575 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001576 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001577 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001578
1579 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001580
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001581 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001582 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001583
1584 if (!new_block->host) {
1585 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001586 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001587 new_block->mr, &err);
1588 if (err) {
1589 error_propagate(errp, err);
1590 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001591 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001592 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001593 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001594 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001595 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001596 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001597 error_setg_errno(errp, errno,
1598 "cannot set up guest memory '%s'",
1599 memory_region_name(new_block->mr));
1600 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001601 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001602 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001603 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001604 }
1605 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001606
Li Zhijiandd631692015-07-02 20:18:06 +08001607 new_ram_size = MAX(old_ram_size,
1608 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1609 if (new_ram_size > old_ram_size) {
1610 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001611 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001612 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001613 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1614 * QLIST (which has an RCU-friendly variant) does not have insertion at
1615 * tail, so save the last element in last_block.
1616 */
Mike Day0dc3f442013-09-05 14:41:35 -04001617 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001618 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001619 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001620 break;
1621 }
1622 }
1623 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001624 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001625 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001626 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001627 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001628 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001629 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001630 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001631
Mike Day0dc3f442013-09-05 14:41:35 -04001632 /* Write list before version */
1633 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001634 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001635 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001636
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001637 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001638 new_block->used_length,
1639 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001640
Paolo Bonzinia904c912015-01-21 16:18:35 +01001641 if (new_block->host) {
1642 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1643 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1644 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1645 if (kvm_enabled()) {
1646 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1647 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001648 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001649}
1650
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001651#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001652RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1653 bool share, const char *mem_path,
1654 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001655{
1656 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001657 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001658
1659 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001660 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001661 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001662 }
1663
1664 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1665 /*
1666 * file_ram_alloc() needs to allocate just like
1667 * phys_mem_alloc, but we haven't bothered to provide
1668 * a hook there.
1669 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001670 error_setg(errp,
1671 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001672 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001673 }
1674
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001675 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001676 new_block = g_malloc0(sizeof(*new_block));
1677 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001678 new_block->used_length = size;
1679 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001680 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001681 new_block->host = file_ram_alloc(new_block, size,
1682 mem_path, errp);
1683 if (!new_block->host) {
1684 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001685 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001686 }
1687
Fam Zheng528f46a2016-03-01 14:18:18 +08001688 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001689 if (local_err) {
1690 g_free(new_block);
1691 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001692 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001693 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001694 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001695}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001696#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001697
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001698static
Fam Zheng528f46a2016-03-01 14:18:18 +08001699RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1700 void (*resized)(const char*,
1701 uint64_t length,
1702 void *host),
1703 void *host, bool resizeable,
1704 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001705{
1706 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001707 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001708
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001709 size = HOST_PAGE_ALIGN(size);
1710 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001711 new_block = g_malloc0(sizeof(*new_block));
1712 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001713 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001714 new_block->used_length = size;
1715 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001716 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001717 new_block->fd = -1;
1718 new_block->host = host;
1719 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001720 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001721 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001722 if (resizeable) {
1723 new_block->flags |= RAM_RESIZEABLE;
1724 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001725 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001726 if (local_err) {
1727 g_free(new_block);
1728 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001729 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001730 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001731 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001732}
1733
Fam Zheng528f46a2016-03-01 14:18:18 +08001734RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001735 MemoryRegion *mr, Error **errp)
1736{
1737 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1738}
1739
Fam Zheng528f46a2016-03-01 14:18:18 +08001740RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001741{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001742 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1743}
1744
Fam Zheng528f46a2016-03-01 14:18:18 +08001745RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001746 void (*resized)(const char*,
1747 uint64_t length,
1748 void *host),
1749 MemoryRegion *mr, Error **errp)
1750{
1751 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001752}
bellarde9a1ab12007-02-08 23:08:38 +00001753
Paolo Bonzini43771532013-09-09 17:58:40 +02001754static void reclaim_ramblock(RAMBlock *block)
1755{
1756 if (block->flags & RAM_PREALLOC) {
1757 ;
1758 } else if (xen_enabled()) {
1759 xen_invalidate_map_cache_entry(block->host);
1760#ifndef _WIN32
1761 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001762 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001763 close(block->fd);
1764#endif
1765 } else {
1766 qemu_anon_ram_free(block->host, block->max_length);
1767 }
1768 g_free(block);
1769}
1770
Fam Zhengf1060c52016-03-01 14:18:22 +08001771void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001772{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001773 if (!block) {
1774 return;
1775 }
1776
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001777 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001778 QLIST_REMOVE_RCU(block, next);
1779 ram_list.mru_block = NULL;
1780 /* Write list before version */
1781 smp_wmb();
1782 ram_list.version++;
1783 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001784 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001785}
1786
Huang Yingcd19cfa2011-03-02 08:56:19 +01001787#ifndef _WIN32
1788void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1789{
1790 RAMBlock *block;
1791 ram_addr_t offset;
1792 int flags;
1793 void *area, *vaddr;
1794
Mike Day0dc3f442013-09-05 14:41:35 -04001795 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001796 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001797 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001798 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001799 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001800 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001801 } else if (xen_enabled()) {
1802 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001803 } else {
1804 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001805 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001806 flags |= (block->flags & RAM_SHARED ?
1807 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001808 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1809 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001810 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001811 /*
1812 * Remap needs to match alloc. Accelerators that
1813 * set phys_mem_alloc never remap. If they did,
1814 * we'd need a remap hook here.
1815 */
1816 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1817
Huang Yingcd19cfa2011-03-02 08:56:19 +01001818 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1819 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1820 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001821 }
1822 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001823 fprintf(stderr, "Could not remap addr: "
1824 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001825 length, addr);
1826 exit(1);
1827 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001828 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001829 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001830 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001831 }
1832 }
1833}
1834#endif /* !_WIN32 */
1835
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001836/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001837 * This should not be used for general purpose DMA. Use address_space_map
1838 * or address_space_rw instead. For local memory (e.g. video ram) that the
1839 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001840 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001841 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001842 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001843void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001844{
Gonglei3655cb92016-02-20 10:35:20 +08001845 RAMBlock *block = ram_block;
1846
1847 if (block == NULL) {
1848 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001849 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001850 }
Mike Dayae3a7042013-09-05 14:41:35 -04001851
1852 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001853 /* We need to check if the requested address is in the RAM
1854 * because we don't want to map the entire memory in QEMU.
1855 * In that case just map until the end of the page.
1856 */
1857 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001858 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001859 }
Mike Dayae3a7042013-09-05 14:41:35 -04001860
1861 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001862 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001863 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001864}
1865
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001866/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001867 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001868 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001869 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001870 */
Gonglei3655cb92016-02-20 10:35:20 +08001871static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1872 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001873{
Gonglei3655cb92016-02-20 10:35:20 +08001874 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001875 if (*size == 0) {
1876 return NULL;
1877 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001878
Gonglei3655cb92016-02-20 10:35:20 +08001879 if (block == NULL) {
1880 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001881 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001882 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001883 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001884
1885 if (xen_enabled() && block->host == NULL) {
1886 /* We need to check if the requested address is in the RAM
1887 * because we don't want to map the entire memory in QEMU.
1888 * In that case just map the requested area.
1889 */
1890 if (block->offset == 0) {
1891 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001892 }
1893
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001894 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001895 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001896
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001897 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001898}
1899
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001900/*
1901 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1902 * in that RAMBlock.
1903 *
1904 * ptr: Host pointer to look up
1905 * round_offset: If true round the result offset down to a page boundary
1906 * *ram_addr: set to result ram_addr
1907 * *offset: set to result offset within the RAMBlock
1908 *
1909 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001910 *
1911 * By the time this function returns, the returned pointer is not protected
1912 * by RCU anymore. If the caller is not within an RCU critical section and
1913 * does not hold the iothread lock, it must have other means of protecting the
1914 * pointer, such as a reference to the region that includes the incoming
1915 * ram_addr_t.
1916 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001917RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001918 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001919{
pbrook94a6b542009-04-11 17:15:54 +00001920 RAMBlock *block;
1921 uint8_t *host = ptr;
1922
Jan Kiszka868bb332011-06-21 22:59:09 +02001923 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001924 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001925 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001926 ram_addr = xen_ram_addr_from_mapcache(ptr);
1927 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001928 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001929 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001930 }
Mike Day0dc3f442013-09-05 14:41:35 -04001931 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001932 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001933 }
1934
Mike Day0dc3f442013-09-05 14:41:35 -04001935 rcu_read_lock();
1936 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001937 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001938 goto found;
1939 }
1940
Mike Day0dc3f442013-09-05 14:41:35 -04001941 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001942 /* This case append when the block is not mapped. */
1943 if (block->host == NULL) {
1944 continue;
1945 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001946 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001947 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001948 }
pbrook94a6b542009-04-11 17:15:54 +00001949 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001950
Mike Day0dc3f442013-09-05 14:41:35 -04001951 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001952 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001953
1954found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001955 *offset = (host - block->host);
1956 if (round_offset) {
1957 *offset &= TARGET_PAGE_MASK;
1958 }
Mike Day0dc3f442013-09-05 14:41:35 -04001959 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001960 return block;
1961}
1962
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001963/*
1964 * Finds the named RAMBlock
1965 *
1966 * name: The name of RAMBlock to find
1967 *
1968 * Returns: RAMBlock (or NULL if not found)
1969 */
1970RAMBlock *qemu_ram_block_by_name(const char *name)
1971{
1972 RAMBlock *block;
1973
1974 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1975 if (!strcmp(name, block->idstr)) {
1976 return block;
1977 }
1978 }
1979
1980 return NULL;
1981}
1982
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001983/* Some of the softmmu routines need to translate from a host pointer
1984 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001985ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001986{
1987 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001988 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001989
Paolo Bonzinif615f392016-05-26 10:07:50 +02001990 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001991 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001992 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001993 }
1994
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001995 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001996}
Alex Williamsonf471a172010-06-11 11:11:42 -06001997
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001998/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001999static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002000 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002001{
Juan Quintela52159192013-10-08 12:44:04 +02002002 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002003 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002004 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002005 switch (size) {
2006 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002007 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002008 break;
2009 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002010 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002011 break;
2012 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002013 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002014 break;
2015 default:
2016 abort();
2017 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002018 /* Set both VGA and migration bits for simplicity and to remove
2019 * the notdirty callback faster.
2020 */
2021 cpu_physical_memory_set_dirty_range(ram_addr, size,
2022 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002023 /* we remove the notdirty callback only if the code has been
2024 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002025 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002026 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002027 }
bellard1ccde1c2004-02-06 19:46:14 +00002028}
2029
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002030static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2031 unsigned size, bool is_write)
2032{
2033 return is_write;
2034}
2035
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002036static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002037 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002038 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002039 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002040};
2041
pbrook0f459d12008-06-09 00:20:13 +00002042/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002043static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002044{
Andreas Färber93afead2013-08-26 03:41:01 +02002045 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002046 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002047 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002048 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002049 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002050 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002051 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002052
Andreas Färberff4700b2013-08-26 18:23:18 +02002053 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002054 /* We re-entered the check after replacing the TB. Now raise
2055 * the debug interrupt so that is will trigger after the
2056 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002057 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002058 return;
2059 }
Andreas Färber93afead2013-08-26 03:41:01 +02002060 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002061 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002062 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2063 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002064 if (flags == BP_MEM_READ) {
2065 wp->flags |= BP_WATCHPOINT_HIT_READ;
2066 } else {
2067 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2068 }
2069 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002070 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002071 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002072 if (wp->flags & BP_CPU &&
2073 !cc->debug_check_watchpoint(cpu, wp)) {
2074 wp->flags &= ~BP_WATCHPOINT_HIT;
2075 continue;
2076 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002077 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002078 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002079 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002080 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002081 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002082 } else {
2083 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002084 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002085 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002086 }
aliguori06d55cc2008-11-18 20:24:06 +00002087 }
aliguori6e140f22008-11-18 20:37:55 +00002088 } else {
2089 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002090 }
2091 }
2092}
2093
pbrook6658ffb2007-03-16 23:58:11 +00002094/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2095 so these check for a hit then pass through to the normal out-of-line
2096 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002097static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2098 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002099{
Peter Maydell66b9b432015-04-26 16:49:24 +01002100 MemTxResult res;
2101 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002102 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2103 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002104
Peter Maydell66b9b432015-04-26 16:49:24 +01002105 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002106 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002107 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002108 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002109 break;
2110 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002111 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002112 break;
2113 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002114 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002115 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002116 default: abort();
2117 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002118 *pdata = data;
2119 return res;
2120}
2121
2122static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2123 uint64_t val, unsigned size,
2124 MemTxAttrs attrs)
2125{
2126 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002127 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2128 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002129
2130 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2131 switch (size) {
2132 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002133 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002134 break;
2135 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002136 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002137 break;
2138 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002139 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002140 break;
2141 default: abort();
2142 }
2143 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002144}
2145
Avi Kivity1ec9b902012-01-02 12:47:48 +02002146static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002147 .read_with_attrs = watch_mem_read,
2148 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002149 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002150};
pbrook6658ffb2007-03-16 23:58:11 +00002151
Peter Maydellf25a49e2015-04-26 16:49:24 +01002152static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2153 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002154{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002155 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002156 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002157 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002158
blueswir1db7b5422007-05-26 17:36:03 +00002159#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002160 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002161 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002162#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002163 res = address_space_read(subpage->as, addr + subpage->base,
2164 attrs, buf, len);
2165 if (res) {
2166 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002167 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002168 switch (len) {
2169 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002170 *data = ldub_p(buf);
2171 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002172 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002173 *data = lduw_p(buf);
2174 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002175 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002176 *data = ldl_p(buf);
2177 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002178 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002179 *data = ldq_p(buf);
2180 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002181 default:
2182 abort();
2183 }
blueswir1db7b5422007-05-26 17:36:03 +00002184}
2185
Peter Maydellf25a49e2015-04-26 16:49:24 +01002186static MemTxResult subpage_write(void *opaque, hwaddr addr,
2187 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002188{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002189 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002190 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002191
blueswir1db7b5422007-05-26 17:36:03 +00002192#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002193 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002194 " value %"PRIx64"\n",
2195 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002196#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002197 switch (len) {
2198 case 1:
2199 stb_p(buf, value);
2200 break;
2201 case 2:
2202 stw_p(buf, value);
2203 break;
2204 case 4:
2205 stl_p(buf, value);
2206 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002207 case 8:
2208 stq_p(buf, value);
2209 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002210 default:
2211 abort();
2212 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002213 return address_space_write(subpage->as, addr + subpage->base,
2214 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002215}
2216
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002217static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002218 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002219{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002220 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002221#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002222 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002223 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002224#endif
2225
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002226 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002227 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002228}
2229
Avi Kivity70c68e42012-01-02 12:32:48 +02002230static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002231 .read_with_attrs = subpage_read,
2232 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002233 .impl.min_access_size = 1,
2234 .impl.max_access_size = 8,
2235 .valid.min_access_size = 1,
2236 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002237 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002238 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002239};
2240
Anthony Liguoric227f092009-10-01 16:12:16 -05002241static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002242 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002243{
2244 int idx, eidx;
2245
2246 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2247 return -1;
2248 idx = SUBPAGE_IDX(start);
2249 eidx = SUBPAGE_IDX(end);
2250#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002251 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2252 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002253#endif
blueswir1db7b5422007-05-26 17:36:03 +00002254 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002255 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002256 }
2257
2258 return 0;
2259}
2260
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002261static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002262{
Anthony Liguoric227f092009-10-01 16:12:16 -05002263 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002264
Anthony Liguori7267c092011-08-20 22:09:37 -05002265 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002266
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002267 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002268 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002269 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002270 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002271 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002272#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002273 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2274 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002275#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002276 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002277
2278 return mmio;
2279}
2280
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002281static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2282 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002283{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002284 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002285 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002286 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002287 .mr = mr,
2288 .offset_within_address_space = 0,
2289 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002290 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002291 };
2292
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002293 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002294}
2295
Peter Maydella54c87b2016-01-21 14:15:05 +00002296MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002297{
Peter Maydella54c87b2016-01-21 14:15:05 +00002298 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2299 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002300 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002301 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002302
2303 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002304}
2305
Avi Kivitye9179ce2009-06-14 11:38:52 +03002306static void io_mem_init(void)
2307{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002308 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002309 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002310 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002311 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002312 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002313 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002314 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002315}
2316
Avi Kivityac1970f2012-10-03 16:22:53 +02002317static void mem_begin(MemoryListener *listener)
2318{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002319 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002320 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2321 uint16_t n;
2322
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002323 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002324 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002325 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002326 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002327 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002328 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002329 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002330 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002331
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002332 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002333 d->as = as;
2334 as->next_dispatch = d;
2335}
2336
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002337static void address_space_dispatch_free(AddressSpaceDispatch *d)
2338{
2339 phys_sections_free(&d->map);
2340 g_free(d);
2341}
2342
Paolo Bonzini00752702013-05-29 12:13:54 +02002343static void mem_commit(MemoryListener *listener)
2344{
2345 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002346 AddressSpaceDispatch *cur = as->dispatch;
2347 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002348
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002349 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002350
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002351 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002352 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002353 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002354 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002355}
2356
Avi Kivity1d711482012-10-02 18:54:45 +02002357static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002358{
Peter Maydell32857f42015-10-01 15:29:50 +01002359 CPUAddressSpace *cpuas;
2360 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002361
2362 /* since each CPU stores ram addresses in its TLB cache, we must
2363 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002364 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2365 cpu_reloading_memory_map();
2366 /* The CPU and TLB are protected by the iothread lock.
2367 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2368 * may have split the RCU critical section.
2369 */
2370 d = atomic_rcu_read(&cpuas->as->dispatch);
2371 cpuas->memory_dispatch = d;
2372 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002373}
2374
Avi Kivityac1970f2012-10-03 16:22:53 +02002375void address_space_init_dispatch(AddressSpace *as)
2376{
Paolo Bonzini00752702013-05-29 12:13:54 +02002377 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002378 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002379 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002380 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002381 .region_add = mem_add,
2382 .region_nop = mem_add,
2383 .priority = 0,
2384 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002385 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002386}
2387
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002388void address_space_unregister(AddressSpace *as)
2389{
2390 memory_listener_unregister(&as->dispatch_listener);
2391}
2392
Avi Kivity83f3c252012-10-07 12:59:55 +02002393void address_space_destroy_dispatch(AddressSpace *as)
2394{
2395 AddressSpaceDispatch *d = as->dispatch;
2396
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002397 atomic_rcu_set(&as->dispatch, NULL);
2398 if (d) {
2399 call_rcu(d, address_space_dispatch_free, rcu);
2400 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002401}
2402
Avi Kivity62152b82011-07-26 14:26:14 +03002403static void memory_map_init(void)
2404{
Anthony Liguori7267c092011-08-20 22:09:37 -05002405 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002406
Paolo Bonzini57271d62013-11-07 17:14:37 +01002407 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002408 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002409
Anthony Liguori7267c092011-08-20 22:09:37 -05002410 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002411 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2412 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002413 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002414}
2415
2416MemoryRegion *get_system_memory(void)
2417{
2418 return system_memory;
2419}
2420
Avi Kivity309cb472011-08-08 16:09:03 +03002421MemoryRegion *get_system_io(void)
2422{
2423 return system_io;
2424}
2425
pbrooke2eef172008-06-08 01:09:01 +00002426#endif /* !defined(CONFIG_USER_ONLY) */
2427
bellard13eb76e2004-01-24 15:23:36 +00002428/* physical memory access (slow version, mainly for debug) */
2429#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002430int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002431 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002432{
2433 int l, flags;
2434 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002435 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002436
2437 while (len > 0) {
2438 page = addr & TARGET_PAGE_MASK;
2439 l = (page + TARGET_PAGE_SIZE) - addr;
2440 if (l > len)
2441 l = len;
2442 flags = page_get_flags(page);
2443 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002444 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002445 if (is_write) {
2446 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002447 return -1;
bellard579a97f2007-11-11 14:26:47 +00002448 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002449 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002450 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002451 memcpy(p, buf, l);
2452 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002453 } else {
2454 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002455 return -1;
bellard579a97f2007-11-11 14:26:47 +00002456 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002457 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002458 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002459 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002460 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002461 }
2462 len -= l;
2463 buf += l;
2464 addr += l;
2465 }
Paul Brooka68fe892010-03-01 00:08:59 +00002466 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002467}
bellard8df1cd02005-01-28 22:37:22 +00002468
bellard13eb76e2004-01-24 15:23:36 +00002469#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002470
Paolo Bonzini845b6212015-03-23 11:45:53 +01002471static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002472 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002473{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002474 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002475 addr += memory_region_get_ram_addr(mr);
2476
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002477 /* No early return if dirty_log_mask is or becomes 0, because
2478 * cpu_physical_memory_set_dirty_range will still call
2479 * xen_modified_memory.
2480 */
2481 if (dirty_log_mask) {
2482 dirty_log_mask =
2483 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002484 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002485 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2486 tb_invalidate_phys_range(addr, addr + length);
2487 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2488 }
2489 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002490}
2491
Richard Henderson23326162013-07-08 14:55:59 -07002492static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002493{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002494 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002495
2496 /* Regions are assumed to support 1-4 byte accesses unless
2497 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002498 if (access_size_max == 0) {
2499 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002500 }
Richard Henderson23326162013-07-08 14:55:59 -07002501
2502 /* Bound the maximum access by the alignment of the address. */
2503 if (!mr->ops->impl.unaligned) {
2504 unsigned align_size_max = addr & -addr;
2505 if (align_size_max != 0 && align_size_max < access_size_max) {
2506 access_size_max = align_size_max;
2507 }
2508 }
2509
2510 /* Don't attempt accesses larger than the maximum. */
2511 if (l > access_size_max) {
2512 l = access_size_max;
2513 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002514 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002515
2516 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002517}
2518
Jan Kiszka4840f102015-06-18 18:47:22 +02002519static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002520{
Jan Kiszka4840f102015-06-18 18:47:22 +02002521 bool unlocked = !qemu_mutex_iothread_locked();
2522 bool release_lock = false;
2523
2524 if (unlocked && mr->global_locking) {
2525 qemu_mutex_lock_iothread();
2526 unlocked = false;
2527 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002528 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002529 if (mr->flush_coalesced_mmio) {
2530 if (unlocked) {
2531 qemu_mutex_lock_iothread();
2532 }
2533 qemu_flush_coalesced_mmio_buffer();
2534 if (unlocked) {
2535 qemu_mutex_unlock_iothread();
2536 }
2537 }
2538
2539 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002540}
2541
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002542/* Called within RCU critical section. */
2543static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2544 MemTxAttrs attrs,
2545 const uint8_t *buf,
2546 int len, hwaddr addr1,
2547 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002548{
bellard13eb76e2004-01-24 15:23:36 +00002549 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002550 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002551 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002552 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002553
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002554 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002555 if (!memory_access_is_direct(mr, true)) {
2556 release_lock |= prepare_mmio_access(mr);
2557 l = memory_access_size(mr, l, addr1);
2558 /* XXX: could force current_cpu to NULL to avoid
2559 potential bugs */
2560 switch (l) {
2561 case 8:
2562 /* 64 bit write access */
2563 val = ldq_p(buf);
2564 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2565 attrs);
2566 break;
2567 case 4:
2568 /* 32 bit write access */
2569 val = ldl_p(buf);
2570 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2571 attrs);
2572 break;
2573 case 2:
2574 /* 16 bit write access */
2575 val = lduw_p(buf);
2576 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2577 attrs);
2578 break;
2579 case 1:
2580 /* 8 bit write access */
2581 val = ldub_p(buf);
2582 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2583 attrs);
2584 break;
2585 default:
2586 abort();
bellard13eb76e2004-01-24 15:23:36 +00002587 }
2588 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002589 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002590 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002591 memcpy(ptr, buf, l);
2592 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002593 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002594
2595 if (release_lock) {
2596 qemu_mutex_unlock_iothread();
2597 release_lock = false;
2598 }
2599
bellard13eb76e2004-01-24 15:23:36 +00002600 len -= l;
2601 buf += l;
2602 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002603
2604 if (!len) {
2605 break;
2606 }
2607
2608 l = len;
2609 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002610 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002611
Peter Maydell3b643492015-04-26 16:49:23 +01002612 return result;
bellard13eb76e2004-01-24 15:23:36 +00002613}
bellard8df1cd02005-01-28 22:37:22 +00002614
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002615MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2616 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002617{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002618 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002619 hwaddr addr1;
2620 MemoryRegion *mr;
2621 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002622
2623 if (len > 0) {
2624 rcu_read_lock();
2625 l = len;
2626 mr = address_space_translate(as, addr, &addr1, &l, true);
2627 result = address_space_write_continue(as, addr, attrs, buf, len,
2628 addr1, l, mr);
2629 rcu_read_unlock();
2630 }
2631
2632 return result;
2633}
2634
2635/* Called within RCU critical section. */
2636MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2637 MemTxAttrs attrs, uint8_t *buf,
2638 int len, hwaddr addr1, hwaddr l,
2639 MemoryRegion *mr)
2640{
2641 uint8_t *ptr;
2642 uint64_t val;
2643 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002644 bool release_lock = false;
2645
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002646 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002647 if (!memory_access_is_direct(mr, false)) {
2648 /* I/O case */
2649 release_lock |= prepare_mmio_access(mr);
2650 l = memory_access_size(mr, l, addr1);
2651 switch (l) {
2652 case 8:
2653 /* 64 bit read access */
2654 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2655 attrs);
2656 stq_p(buf, val);
2657 break;
2658 case 4:
2659 /* 32 bit read access */
2660 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2661 attrs);
2662 stl_p(buf, val);
2663 break;
2664 case 2:
2665 /* 16 bit read access */
2666 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2667 attrs);
2668 stw_p(buf, val);
2669 break;
2670 case 1:
2671 /* 8 bit read access */
2672 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2673 attrs);
2674 stb_p(buf, val);
2675 break;
2676 default:
2677 abort();
2678 }
2679 } else {
2680 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002681 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002682 memcpy(buf, ptr, l);
2683 }
2684
2685 if (release_lock) {
2686 qemu_mutex_unlock_iothread();
2687 release_lock = false;
2688 }
2689
2690 len -= l;
2691 buf += l;
2692 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002693
2694 if (!len) {
2695 break;
2696 }
2697
2698 l = len;
2699 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002700 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002701
2702 return result;
2703}
2704
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002705MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2706 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002707{
2708 hwaddr l;
2709 hwaddr addr1;
2710 MemoryRegion *mr;
2711 MemTxResult result = MEMTX_OK;
2712
2713 if (len > 0) {
2714 rcu_read_lock();
2715 l = len;
2716 mr = address_space_translate(as, addr, &addr1, &l, false);
2717 result = address_space_read_continue(as, addr, attrs, buf, len,
2718 addr1, l, mr);
2719 rcu_read_unlock();
2720 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002721
2722 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002723}
2724
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002725MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2726 uint8_t *buf, int len, bool is_write)
2727{
2728 if (is_write) {
2729 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2730 } else {
2731 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2732 }
2733}
Avi Kivityac1970f2012-10-03 16:22:53 +02002734
Avi Kivitya8170e52012-10-23 12:30:10 +02002735void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002736 int len, int is_write)
2737{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002738 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2739 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002740}
2741
Alexander Graf582b55a2013-12-11 14:17:44 +01002742enum write_rom_type {
2743 WRITE_DATA,
2744 FLUSH_CACHE,
2745};
2746
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002747static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002748 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002749{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002750 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002751 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002752 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002753 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002754
Paolo Bonzini41063e12015-03-18 14:21:43 +01002755 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002756 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002757 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002758 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002759
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002760 if (!(memory_region_is_ram(mr) ||
2761 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002762 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002763 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002764 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002765 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002766 switch (type) {
2767 case WRITE_DATA:
2768 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002769 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002770 break;
2771 case FLUSH_CACHE:
2772 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2773 break;
2774 }
bellardd0ecd2a2006-04-23 17:14:48 +00002775 }
2776 len -= l;
2777 buf += l;
2778 addr += l;
2779 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002780 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002781}
2782
Alexander Graf582b55a2013-12-11 14:17:44 +01002783/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002784void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002785 const uint8_t *buf, int len)
2786{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002787 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002788}
2789
2790void cpu_flush_icache_range(hwaddr start, int len)
2791{
2792 /*
2793 * This function should do the same thing as an icache flush that was
2794 * triggered from within the guest. For TCG we are always cache coherent,
2795 * so there is no need to flush anything. For KVM / Xen we need to flush
2796 * the host's instruction cache at least.
2797 */
2798 if (tcg_enabled()) {
2799 return;
2800 }
2801
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002802 cpu_physical_memory_write_rom_internal(&address_space_memory,
2803 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002804}
2805
aliguori6d16c2f2009-01-22 16:59:11 +00002806typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002807 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002808 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002809 hwaddr addr;
2810 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002811 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002812} BounceBuffer;
2813
2814static BounceBuffer bounce;
2815
aliguoriba223c22009-01-22 16:59:16 +00002816typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002817 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002818 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002819} MapClient;
2820
Fam Zheng38e047b2015-03-16 17:03:35 +08002821QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002822static QLIST_HEAD(map_client_list, MapClient) map_client_list
2823 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002824
Fam Zhenge95205e2015-03-16 17:03:37 +08002825static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002826{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002827 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002828 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002829}
2830
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002831static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002832{
2833 MapClient *client;
2834
Blue Swirl72cf2d42009-09-12 07:36:22 +00002835 while (!QLIST_EMPTY(&map_client_list)) {
2836 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002837 qemu_bh_schedule(client->bh);
2838 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002839 }
2840}
2841
Fam Zhenge95205e2015-03-16 17:03:37 +08002842void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002843{
2844 MapClient *client = g_malloc(sizeof(*client));
2845
Fam Zheng38e047b2015-03-16 17:03:35 +08002846 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002847 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002848 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002849 if (!atomic_read(&bounce.in_use)) {
2850 cpu_notify_map_clients_locked();
2851 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002852 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002853}
2854
Fam Zheng38e047b2015-03-16 17:03:35 +08002855void cpu_exec_init_all(void)
2856{
2857 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002858 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002859 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002860 qemu_mutex_init(&map_client_list_lock);
2861}
2862
Fam Zhenge95205e2015-03-16 17:03:37 +08002863void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002864{
Fam Zhenge95205e2015-03-16 17:03:37 +08002865 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002866
Fam Zhenge95205e2015-03-16 17:03:37 +08002867 qemu_mutex_lock(&map_client_list_lock);
2868 QLIST_FOREACH(client, &map_client_list, link) {
2869 if (client->bh == bh) {
2870 cpu_unregister_map_client_do(client);
2871 break;
2872 }
2873 }
2874 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002875}
2876
2877static void cpu_notify_map_clients(void)
2878{
Fam Zheng38e047b2015-03-16 17:03:35 +08002879 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002880 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002881 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002882}
2883
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002884bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2885{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002886 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002887 hwaddr l, xlat;
2888
Paolo Bonzini41063e12015-03-18 14:21:43 +01002889 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002890 while (len > 0) {
2891 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002892 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2893 if (!memory_access_is_direct(mr, is_write)) {
2894 l = memory_access_size(mr, l, addr);
2895 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002896 return false;
2897 }
2898 }
2899
2900 len -= l;
2901 addr += l;
2902 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002903 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002904 return true;
2905}
2906
aliguori6d16c2f2009-01-22 16:59:11 +00002907/* Map a physical memory region into a host virtual address.
2908 * May map a subset of the requested range, given by and returned in *plen.
2909 * May return NULL if resources needed to perform the mapping are exhausted.
2910 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002911 * Use cpu_register_map_client() to know when retrying the map operation is
2912 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002913 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002914void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002915 hwaddr addr,
2916 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002917 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002918{
Avi Kivitya8170e52012-10-23 12:30:10 +02002919 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002920 hwaddr done = 0;
2921 hwaddr l, xlat, base;
2922 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002923 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002924
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002925 if (len == 0) {
2926 return NULL;
2927 }
aliguori6d16c2f2009-01-22 16:59:11 +00002928
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002929 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002930 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002931 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002932
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002933 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002934 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002935 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002936 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002937 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002938 /* Avoid unbounded allocations */
2939 l = MIN(l, TARGET_PAGE_SIZE);
2940 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002941 bounce.addr = addr;
2942 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002943
2944 memory_region_ref(mr);
2945 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002946 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002947 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2948 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002949 }
aliguori6d16c2f2009-01-22 16:59:11 +00002950
Paolo Bonzini41063e12015-03-18 14:21:43 +01002951 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002952 *plen = l;
2953 return bounce.buffer;
2954 }
2955
2956 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002957
2958 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002959 len -= l;
2960 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002961 done += l;
2962 if (len == 0) {
2963 break;
2964 }
2965
2966 l = len;
2967 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2968 if (this_mr != mr || xlat != base + done) {
2969 break;
2970 }
aliguori6d16c2f2009-01-22 16:59:11 +00002971 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002972
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002973 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002974 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002975 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002976 rcu_read_unlock();
2977
2978 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002979}
2980
Avi Kivityac1970f2012-10-03 16:22:53 +02002981/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002982 * Will also mark the memory as dirty if is_write == 1. access_len gives
2983 * the amount of memory that was actually read or written by the caller.
2984 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002985void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2986 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002987{
2988 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002989 MemoryRegion *mr;
2990 ram_addr_t addr1;
2991
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002992 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002993 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002994 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002995 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002996 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002997 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002998 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002999 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003000 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003001 return;
3002 }
3003 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003004 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3005 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003006 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003007 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003008 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003009 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003010 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003011 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003012}
bellardd0ecd2a2006-04-23 17:14:48 +00003013
Avi Kivitya8170e52012-10-23 12:30:10 +02003014void *cpu_physical_memory_map(hwaddr addr,
3015 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003016 int is_write)
3017{
3018 return address_space_map(&address_space_memory, addr, plen, is_write);
3019}
3020
Avi Kivitya8170e52012-10-23 12:30:10 +02003021void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3022 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003023{
3024 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3025}
3026
bellard8df1cd02005-01-28 22:37:22 +00003027/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003028static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3029 MemTxAttrs attrs,
3030 MemTxResult *result,
3031 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003032{
bellard8df1cd02005-01-28 22:37:22 +00003033 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003034 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003035 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003036 hwaddr l = 4;
3037 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003038 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003039 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003040
Paolo Bonzini41063e12015-03-18 14:21:43 +01003041 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003042 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003043 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003044 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003045
bellard8df1cd02005-01-28 22:37:22 +00003046 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003047 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003048#if defined(TARGET_WORDS_BIGENDIAN)
3049 if (endian == DEVICE_LITTLE_ENDIAN) {
3050 val = bswap32(val);
3051 }
3052#else
3053 if (endian == DEVICE_BIG_ENDIAN) {
3054 val = bswap32(val);
3055 }
3056#endif
bellard8df1cd02005-01-28 22:37:22 +00003057 } else {
3058 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003059 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003060 switch (endian) {
3061 case DEVICE_LITTLE_ENDIAN:
3062 val = ldl_le_p(ptr);
3063 break;
3064 case DEVICE_BIG_ENDIAN:
3065 val = ldl_be_p(ptr);
3066 break;
3067 default:
3068 val = ldl_p(ptr);
3069 break;
3070 }
Peter Maydell50013112015-04-26 16:49:24 +01003071 r = MEMTX_OK;
3072 }
3073 if (result) {
3074 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003075 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003076 if (release_lock) {
3077 qemu_mutex_unlock_iothread();
3078 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003079 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003080 return val;
3081}
3082
Peter Maydell50013112015-04-26 16:49:24 +01003083uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3084 MemTxAttrs attrs, MemTxResult *result)
3085{
3086 return address_space_ldl_internal(as, addr, attrs, result,
3087 DEVICE_NATIVE_ENDIAN);
3088}
3089
3090uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3091 MemTxAttrs attrs, MemTxResult *result)
3092{
3093 return address_space_ldl_internal(as, addr, attrs, result,
3094 DEVICE_LITTLE_ENDIAN);
3095}
3096
3097uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3098 MemTxAttrs attrs, MemTxResult *result)
3099{
3100 return address_space_ldl_internal(as, addr, attrs, result,
3101 DEVICE_BIG_ENDIAN);
3102}
3103
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003104uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003105{
Peter Maydell50013112015-04-26 16:49:24 +01003106 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003107}
3108
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003109uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003110{
Peter Maydell50013112015-04-26 16:49:24 +01003111 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003112}
3113
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003114uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003115{
Peter Maydell50013112015-04-26 16:49:24 +01003116 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003117}
3118
bellard84b7b8e2005-11-28 21:19:04 +00003119/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003120static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3121 MemTxAttrs attrs,
3122 MemTxResult *result,
3123 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003124{
bellard84b7b8e2005-11-28 21:19:04 +00003125 uint8_t *ptr;
3126 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003127 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003128 hwaddr l = 8;
3129 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003130 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003131 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003132
Paolo Bonzini41063e12015-03-18 14:21:43 +01003133 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003134 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003135 false);
3136 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003137 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003138
bellard84b7b8e2005-11-28 21:19:04 +00003139 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003140 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003141#if defined(TARGET_WORDS_BIGENDIAN)
3142 if (endian == DEVICE_LITTLE_ENDIAN) {
3143 val = bswap64(val);
3144 }
3145#else
3146 if (endian == DEVICE_BIG_ENDIAN) {
3147 val = bswap64(val);
3148 }
3149#endif
bellard84b7b8e2005-11-28 21:19:04 +00003150 } else {
3151 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003152 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003153 switch (endian) {
3154 case DEVICE_LITTLE_ENDIAN:
3155 val = ldq_le_p(ptr);
3156 break;
3157 case DEVICE_BIG_ENDIAN:
3158 val = ldq_be_p(ptr);
3159 break;
3160 default:
3161 val = ldq_p(ptr);
3162 break;
3163 }
Peter Maydell50013112015-04-26 16:49:24 +01003164 r = MEMTX_OK;
3165 }
3166 if (result) {
3167 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003168 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003169 if (release_lock) {
3170 qemu_mutex_unlock_iothread();
3171 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003172 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003173 return val;
3174}
3175
Peter Maydell50013112015-04-26 16:49:24 +01003176uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3177 MemTxAttrs attrs, MemTxResult *result)
3178{
3179 return address_space_ldq_internal(as, addr, attrs, result,
3180 DEVICE_NATIVE_ENDIAN);
3181}
3182
3183uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3184 MemTxAttrs attrs, MemTxResult *result)
3185{
3186 return address_space_ldq_internal(as, addr, attrs, result,
3187 DEVICE_LITTLE_ENDIAN);
3188}
3189
3190uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3191 MemTxAttrs attrs, MemTxResult *result)
3192{
3193 return address_space_ldq_internal(as, addr, attrs, result,
3194 DEVICE_BIG_ENDIAN);
3195}
3196
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003197uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003198{
Peter Maydell50013112015-04-26 16:49:24 +01003199 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003200}
3201
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003202uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003203{
Peter Maydell50013112015-04-26 16:49:24 +01003204 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003205}
3206
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003207uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003208{
Peter Maydell50013112015-04-26 16:49:24 +01003209 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003210}
3211
bellardaab33092005-10-30 20:48:42 +00003212/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003213uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3214 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003215{
3216 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003217 MemTxResult r;
3218
3219 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3220 if (result) {
3221 *result = r;
3222 }
bellardaab33092005-10-30 20:48:42 +00003223 return val;
3224}
3225
Peter Maydell50013112015-04-26 16:49:24 +01003226uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3227{
3228 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3229}
3230
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003231/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003232static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3233 hwaddr addr,
3234 MemTxAttrs attrs,
3235 MemTxResult *result,
3236 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003237{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003238 uint8_t *ptr;
3239 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003240 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003241 hwaddr l = 2;
3242 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003243 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003244 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003245
Paolo Bonzini41063e12015-03-18 14:21:43 +01003246 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003247 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003248 false);
3249 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003250 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003251
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003252 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003253 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003254#if defined(TARGET_WORDS_BIGENDIAN)
3255 if (endian == DEVICE_LITTLE_ENDIAN) {
3256 val = bswap16(val);
3257 }
3258#else
3259 if (endian == DEVICE_BIG_ENDIAN) {
3260 val = bswap16(val);
3261 }
3262#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003263 } else {
3264 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003265 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003266 switch (endian) {
3267 case DEVICE_LITTLE_ENDIAN:
3268 val = lduw_le_p(ptr);
3269 break;
3270 case DEVICE_BIG_ENDIAN:
3271 val = lduw_be_p(ptr);
3272 break;
3273 default:
3274 val = lduw_p(ptr);
3275 break;
3276 }
Peter Maydell50013112015-04-26 16:49:24 +01003277 r = MEMTX_OK;
3278 }
3279 if (result) {
3280 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003281 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003282 if (release_lock) {
3283 qemu_mutex_unlock_iothread();
3284 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003285 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003286 return val;
bellardaab33092005-10-30 20:48:42 +00003287}
3288
Peter Maydell50013112015-04-26 16:49:24 +01003289uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3290 MemTxAttrs attrs, MemTxResult *result)
3291{
3292 return address_space_lduw_internal(as, addr, attrs, result,
3293 DEVICE_NATIVE_ENDIAN);
3294}
3295
3296uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3297 MemTxAttrs attrs, MemTxResult *result)
3298{
3299 return address_space_lduw_internal(as, addr, attrs, result,
3300 DEVICE_LITTLE_ENDIAN);
3301}
3302
3303uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3304 MemTxAttrs attrs, MemTxResult *result)
3305{
3306 return address_space_lduw_internal(as, addr, attrs, result,
3307 DEVICE_BIG_ENDIAN);
3308}
3309
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003310uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003311{
Peter Maydell50013112015-04-26 16:49:24 +01003312 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003313}
3314
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003315uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003316{
Peter Maydell50013112015-04-26 16:49:24 +01003317 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003318}
3319
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003320uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003321{
Peter Maydell50013112015-04-26 16:49:24 +01003322 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003323}
3324
bellard8df1cd02005-01-28 22:37:22 +00003325/* warning: addr must be aligned. The ram page is not masked as dirty
3326 and the code inside is not invalidated. It is useful if the dirty
3327 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003328void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3329 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003330{
bellard8df1cd02005-01-28 22:37:22 +00003331 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003332 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003333 hwaddr l = 4;
3334 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003335 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003336 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003337 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003338
Paolo Bonzini41063e12015-03-18 14:21:43 +01003339 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003340 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003341 true);
3342 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003343 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003344
Peter Maydell50013112015-04-26 16:49:24 +01003345 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003346 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003347 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003348 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003349
Paolo Bonzini845b6212015-03-23 11:45:53 +01003350 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3351 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003352 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3353 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003354 r = MEMTX_OK;
3355 }
3356 if (result) {
3357 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003358 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003359 if (release_lock) {
3360 qemu_mutex_unlock_iothread();
3361 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003362 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003363}
3364
Peter Maydell50013112015-04-26 16:49:24 +01003365void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3366{
3367 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3368}
3369
bellard8df1cd02005-01-28 22:37:22 +00003370/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003371static inline void address_space_stl_internal(AddressSpace *as,
3372 hwaddr addr, uint32_t val,
3373 MemTxAttrs attrs,
3374 MemTxResult *result,
3375 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003376{
bellard8df1cd02005-01-28 22:37:22 +00003377 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003378 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003379 hwaddr l = 4;
3380 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003381 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003382 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003383
Paolo Bonzini41063e12015-03-18 14:21:43 +01003384 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003385 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003386 true);
3387 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003388 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003389
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003390#if defined(TARGET_WORDS_BIGENDIAN)
3391 if (endian == DEVICE_LITTLE_ENDIAN) {
3392 val = bswap32(val);
3393 }
3394#else
3395 if (endian == DEVICE_BIG_ENDIAN) {
3396 val = bswap32(val);
3397 }
3398#endif
Peter Maydell50013112015-04-26 16:49:24 +01003399 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003400 } else {
bellard8df1cd02005-01-28 22:37:22 +00003401 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003402 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003403 switch (endian) {
3404 case DEVICE_LITTLE_ENDIAN:
3405 stl_le_p(ptr, val);
3406 break;
3407 case DEVICE_BIG_ENDIAN:
3408 stl_be_p(ptr, val);
3409 break;
3410 default:
3411 stl_p(ptr, val);
3412 break;
3413 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003414 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003415 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003416 }
Peter Maydell50013112015-04-26 16:49:24 +01003417 if (result) {
3418 *result = r;
3419 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003420 if (release_lock) {
3421 qemu_mutex_unlock_iothread();
3422 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003423 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003424}
3425
3426void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3427 MemTxAttrs attrs, MemTxResult *result)
3428{
3429 address_space_stl_internal(as, addr, val, attrs, result,
3430 DEVICE_NATIVE_ENDIAN);
3431}
3432
3433void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3434 MemTxAttrs attrs, MemTxResult *result)
3435{
3436 address_space_stl_internal(as, addr, val, attrs, result,
3437 DEVICE_LITTLE_ENDIAN);
3438}
3439
3440void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3441 MemTxAttrs attrs, MemTxResult *result)
3442{
3443 address_space_stl_internal(as, addr, val, attrs, result,
3444 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003445}
3446
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003447void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003448{
Peter Maydell50013112015-04-26 16:49:24 +01003449 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003450}
3451
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003452void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003453{
Peter Maydell50013112015-04-26 16:49:24 +01003454 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003455}
3456
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003457void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003458{
Peter Maydell50013112015-04-26 16:49:24 +01003459 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003460}
3461
bellardaab33092005-10-30 20:48:42 +00003462/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003463void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3464 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003465{
3466 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003467 MemTxResult r;
3468
3469 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3470 if (result) {
3471 *result = r;
3472 }
3473}
3474
3475void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3476{
3477 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003478}
3479
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003480/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003481static inline void address_space_stw_internal(AddressSpace *as,
3482 hwaddr addr, uint32_t val,
3483 MemTxAttrs attrs,
3484 MemTxResult *result,
3485 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003486{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003487 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003488 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003489 hwaddr l = 2;
3490 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003491 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003492 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003493
Paolo Bonzini41063e12015-03-18 14:21:43 +01003494 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003495 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003496 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003497 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003498
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003499#if defined(TARGET_WORDS_BIGENDIAN)
3500 if (endian == DEVICE_LITTLE_ENDIAN) {
3501 val = bswap16(val);
3502 }
3503#else
3504 if (endian == DEVICE_BIG_ENDIAN) {
3505 val = bswap16(val);
3506 }
3507#endif
Peter Maydell50013112015-04-26 16:49:24 +01003508 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003509 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003510 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003511 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003512 switch (endian) {
3513 case DEVICE_LITTLE_ENDIAN:
3514 stw_le_p(ptr, val);
3515 break;
3516 case DEVICE_BIG_ENDIAN:
3517 stw_be_p(ptr, val);
3518 break;
3519 default:
3520 stw_p(ptr, val);
3521 break;
3522 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003523 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003524 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003525 }
Peter Maydell50013112015-04-26 16:49:24 +01003526 if (result) {
3527 *result = r;
3528 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003529 if (release_lock) {
3530 qemu_mutex_unlock_iothread();
3531 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003532 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003533}
3534
3535void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3536 MemTxAttrs attrs, MemTxResult *result)
3537{
3538 address_space_stw_internal(as, addr, val, attrs, result,
3539 DEVICE_NATIVE_ENDIAN);
3540}
3541
3542void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3543 MemTxAttrs attrs, MemTxResult *result)
3544{
3545 address_space_stw_internal(as, addr, val, attrs, result,
3546 DEVICE_LITTLE_ENDIAN);
3547}
3548
3549void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3550 MemTxAttrs attrs, MemTxResult *result)
3551{
3552 address_space_stw_internal(as, addr, val, attrs, result,
3553 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003554}
3555
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003556void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003557{
Peter Maydell50013112015-04-26 16:49:24 +01003558 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003559}
3560
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003561void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003562{
Peter Maydell50013112015-04-26 16:49:24 +01003563 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003564}
3565
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003566void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003567{
Peter Maydell50013112015-04-26 16:49:24 +01003568 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003569}
3570
bellardaab33092005-10-30 20:48:42 +00003571/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003572void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3573 MemTxAttrs attrs, MemTxResult *result)
3574{
3575 MemTxResult r;
3576 val = tswap64(val);
3577 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3578 if (result) {
3579 *result = r;
3580 }
3581}
3582
3583void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3584 MemTxAttrs attrs, MemTxResult *result)
3585{
3586 MemTxResult r;
3587 val = cpu_to_le64(val);
3588 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3589 if (result) {
3590 *result = r;
3591 }
3592}
3593void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3594 MemTxAttrs attrs, MemTxResult *result)
3595{
3596 MemTxResult r;
3597 val = cpu_to_be64(val);
3598 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3599 if (result) {
3600 *result = r;
3601 }
3602}
3603
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003604void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003605{
Peter Maydell50013112015-04-26 16:49:24 +01003606 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003607}
3608
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003609void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003610{
Peter Maydell50013112015-04-26 16:49:24 +01003611 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003612}
3613
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003614void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003615{
Peter Maydell50013112015-04-26 16:49:24 +01003616 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003617}
3618
aliguori5e2972f2009-03-28 17:51:36 +00003619/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003620int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003621 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003622{
3623 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003624 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003625 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003626
3627 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003628 int asidx;
3629 MemTxAttrs attrs;
3630
bellard13eb76e2004-01-24 15:23:36 +00003631 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003632 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3633 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003634 /* if no physical page mapped, return an error */
3635 if (phys_addr == -1)
3636 return -1;
3637 l = (page + TARGET_PAGE_SIZE) - addr;
3638 if (l > len)
3639 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003640 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003641 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003642 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3643 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003644 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003645 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3646 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003647 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003648 }
bellard13eb76e2004-01-24 15:23:36 +00003649 len -= l;
3650 buf += l;
3651 addr += l;
3652 }
3653 return 0;
3654}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003655
3656/*
3657 * Allows code that needs to deal with migration bitmaps etc to still be built
3658 * target independent.
3659 */
3660size_t qemu_target_page_bits(void)
3661{
3662 return TARGET_PAGE_BITS;
3663}
3664
Paul Brooka68fe892010-03-01 00:08:59 +00003665#endif
bellard13eb76e2004-01-24 15:23:36 +00003666
Blue Swirl8e4a4242013-01-06 18:30:17 +00003667/*
3668 * A helper function for the _utterly broken_ virtio device model to find out if
3669 * it's running on a big endian machine. Don't do this at home kids!
3670 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003671bool target_words_bigendian(void);
3672bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003673{
3674#if defined(TARGET_WORDS_BIGENDIAN)
3675 return true;
3676#else
3677 return false;
3678#endif
3679}
3680
Wen Congyang76f35532012-05-07 12:04:18 +08003681#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003682bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003683{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003684 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003685 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003686 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003687
Paolo Bonzini41063e12015-03-18 14:21:43 +01003688 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003689 mr = address_space_translate(&address_space_memory,
3690 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003691
Paolo Bonzini41063e12015-03-18 14:21:43 +01003692 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3693 rcu_read_unlock();
3694 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003695}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003696
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003697int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003698{
3699 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003700 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003701
Mike Day0dc3f442013-09-05 14:41:35 -04003702 rcu_read_lock();
3703 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003704 ret = func(block->idstr, block->host, block->offset,
3705 block->used_length, opaque);
3706 if (ret) {
3707 break;
3708 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003709 }
Mike Day0dc3f442013-09-05 14:41:35 -04003710 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003711 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003712}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003713#endif