blob: 80398b038f9c71ba392a47a62f2e356857582818 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Peter Lieven101420b2016-07-15 12:03:50 +0200190 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200195 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197}
198
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200{
201 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200202 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 PhysPageEntry e;
204 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210
211 e.skip = leaf ? 0 : 1;
212 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200214 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200217}
218
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200219static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
220 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200221 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222{
223 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200226 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200229 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200231
Paolo Bonzini03f49952013-11-07 17:14:36 +0100232 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200234 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200235 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200236 *index += step;
237 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200238 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200239 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200240 }
241 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200242 }
243}
244
Avi Kivityac1970f2012-10-03 16:22:53 +0200245static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200246 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200247 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000248{
Avi Kivity29990972012-02-13 20:21:20 +0200249 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000251
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200252 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000253}
254
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200255/* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
257 */
258static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
259{
260 unsigned valid_ptr = P_L2_SIZE;
261 int valid = 0;
262 PhysPageEntry *p;
263 int i;
264
265 if (lp->ptr == PHYS_MAP_NODE_NIL) {
266 return;
267 }
268
269 p = nodes[lp->ptr];
270 for (i = 0; i < P_L2_SIZE; i++) {
271 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272 continue;
273 }
274
275 valid_ptr = i;
276 valid++;
277 if (p[i].skip) {
278 phys_page_compact(&p[i], nodes, compacted);
279 }
280 }
281
282 /* We can only compress if there's only one child. */
283 if (valid != 1) {
284 return;
285 }
286
287 assert(valid_ptr < P_L2_SIZE);
288
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291 return;
292 }
293
294 lp->ptr = p[valid_ptr].ptr;
295 if (!p[valid_ptr].skip) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
300 * change this rule.
301 */
302 lp->skip = 0;
303 } else {
304 lp->skip += p[valid_ptr].skip;
305 }
306}
307
308static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309{
310 DECLARE_BITMAP(compacted, nodes_nb);
311
312 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200313 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200314 }
315}
316
Fam Zheng29cb5332016-03-01 14:18:23 +0800317static inline bool section_covers_addr(const MemoryRegionSection *section,
318 hwaddr addr)
319{
320 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
321 * the section must cover the entire address space.
322 */
323 return section->size.hi ||
324 range_covers_byte(section->offset_within_address_space,
325 section->size.lo, addr);
326}
327
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200328static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200329 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000330{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200332 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200334
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200335 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200336 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200338 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200339 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100340 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200341 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342
Fam Zheng29cb5332016-03-01 14:18:23 +0800343 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 return &sections[lp.ptr];
345 } else {
346 return &sections[PHYS_SECTION_UNASSIGNED];
347 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200348}
349
Blue Swirle5548612012-04-21 13:08:33 +0000350bool memory_region_is_unassigned(MemoryRegion *mr)
351{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200352 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000353 && mr != &io_mem_watch;
354}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200355
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100356/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200357static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200358 hwaddr addr,
359 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200360{
Fam Zheng729633c2016-03-01 14:18:24 +0800361 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800363 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200364
Fam Zheng729633c2016-03-01 14:18:24 +0800365 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
366 section_covers_addr(section, addr)) {
367 update = false;
368 } else {
369 section = phys_page_find(d->phys_map, addr, d->map.nodes,
370 d->map.sections);
371 update = true;
372 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 if (resolve_subpage && section->mr->subpage) {
374 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200375 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200376 }
Fam Zheng729633c2016-03-01 14:18:24 +0800377 if (update) {
378 atomic_set(&d->mru_section, section);
379 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200381}
382
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100383/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200384static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200385address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200386 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387{
388 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100390 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200392 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 /* Compute offset within MemoryRegionSection */
394 addr -= section->offset_within_address_space;
395
396 /* Compute offset within MemoryRegion */
397 *xlat = addr + section->offset_within_region;
398
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200399 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200400
401 /* MMIO registers can be expected to perform full-width accesses based only
402 * on their address, without considering adjacent registers that could
403 * decode to completely different MemoryRegions. When such registers
404 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405 * regions overlap wildly. For this reason we cannot clamp the accesses
406 * here.
407 *
408 * If the length is small (as is the case for address_space_ldl/stl),
409 * everything works fine. If the incoming length is large, however,
410 * the caller really has to do the clamping through memory_access_size.
411 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200413 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200414 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
415 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200416 return section;
417}
Jan Kiszka90260c62013-05-26 21:46:51 +0200418
Paolo Bonzini41063e12015-03-18 14:21:43 +0100419/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200420MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
421 hwaddr *xlat, hwaddr *plen,
422 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200423{
Avi Kivity30951152012-10-30 13:47:46 +0200424 IOMMUTLBEntry iotlb;
425 MemoryRegionSection *section;
426 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200427
428 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100429 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
430 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200431 mr = section->mr;
432
433 if (!mr->iommu_ops) {
434 break;
435 }
436
Le Tan8d7b8cb2014-08-16 13:55:37 +0800437 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200438 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
439 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200441 if (!(iotlb.perm & (1 << is_write))) {
442 mr = &io_mem_unassigned;
443 break;
444 }
445
446 as = iotlb.target_as;
447 }
448
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000449 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700451 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100452 }
453
Avi Kivity30951152012-10-30 13:47:46 +0200454 *xlat = addr;
455 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200456}
457
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100458/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200459MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000460address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200461 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200462{
Avi Kivity30951152012-10-30 13:47:46 +0200463 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000464 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
465
466 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200467
468 assert(!section->mr->iommu_ops);
469 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200470}
bellard9fa3e852004-01-04 18:06:42 +0000471#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000472
Andreas Färberb170fce2013-01-20 20:23:22 +0100473#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000474
Juan Quintelae59fb372009-09-29 22:48:21 +0200475static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476{
Andreas Färber259186a2013-01-17 18:51:17 +0100477 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200478
aurel323098dba2009-03-07 21:28:24 +0000479 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
480 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100481 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100482 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000483
484 return 0;
485}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487static int cpu_common_pre_load(void *opaque)
488{
489 CPUState *cpu = opaque;
490
Paolo Bonziniadee6422014-12-19 12:53:14 +0100491 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400492
493 return 0;
494}
495
496static bool cpu_common_exception_index_needed(void *opaque)
497{
498 CPUState *cpu = opaque;
499
Paolo Bonziniadee6422014-12-19 12:53:14 +0100500 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400501}
502
503static const VMStateDescription vmstate_cpu_common_exception_index = {
504 .name = "cpu_common/exception_index",
505 .version_id = 1,
506 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200507 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400508 .fields = (VMStateField[]) {
509 VMSTATE_INT32(exception_index, CPUState),
510 VMSTATE_END_OF_LIST()
511 }
512};
513
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300514static bool cpu_common_crash_occurred_needed(void *opaque)
515{
516 CPUState *cpu = opaque;
517
518 return cpu->crash_occurred;
519}
520
521static const VMStateDescription vmstate_cpu_common_crash_occurred = {
522 .name = "cpu_common/crash_occurred",
523 .version_id = 1,
524 .minimum_version_id = 1,
525 .needed = cpu_common_crash_occurred_needed,
526 .fields = (VMStateField[]) {
527 VMSTATE_BOOL(crash_occurred, CPUState),
528 VMSTATE_END_OF_LIST()
529 }
530};
531
Andreas Färber1a1562f2013-06-17 04:09:11 +0200532const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200533 .name = "cpu_common",
534 .version_id = 1,
535 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200537 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200538 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100539 VMSTATE_UINT32(halted, CPUState),
540 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400542 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 .subsections = (const VMStateDescription*[]) {
544 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300545 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200546 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200547 }
548};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200549
pbrook9656f322008-07-01 20:01:19 +0000550#endif
551
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100552CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400553{
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200558 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100559 }
Glauber Costa950f1472009-06-09 12:15:18 -0400560 }
561
Andreas Färberbdc44642013-06-24 23:50:24 +0200562 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400563}
564
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000566void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000567{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000568 CPUAddressSpace *newas;
569
570 /* Target code should have set num_ases before calling us */
571 assert(asidx < cpu->num_ases);
572
Peter Maydell56943e82016-01-21 14:15:04 +0000573 if (asidx == 0) {
574 /* address space 0 gets the convenience alias */
575 cpu->as = as;
576 }
577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 /* KVM cannot currently support multiple address spaces. */
579 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000581 if (!cpu->cpu_ases) {
582 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000583 }
Peter Maydell32857f42015-10-01 15:29:50 +0100584
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000585 newas = &cpu->cpu_ases[asidx];
586 newas->cpu = cpu;
587 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000588 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000589 newas->tcg_as_listener.commit = tcg_commit;
590 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000591 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000592}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000593
594AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
595{
596 /* Return the AddressSpace corresponding to the specified index */
597 return cpu->cpu_ases[asidx].as;
598}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599#endif
600
Igor Mammedov630eb0f2016-07-27 11:24:54 +0200601static bool cpu_index_auto_assigned;
602
Igor Mammedova07f9532016-07-25 11:59:21 +0200603static int cpu_get_free_index(void)
Bharata B Raob7bca732015-06-23 19:31:13 -0700604{
605 CPUState *some_cpu;
606 int cpu_index = 0;
607
Igor Mammedov630eb0f2016-07-27 11:24:54 +0200608 cpu_index_auto_assigned = true;
Bharata B Raob7bca732015-06-23 19:31:13 -0700609 CPU_FOREACH(some_cpu) {
610 cpu_index++;
611 }
612 return cpu_index;
613}
614
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530615void cpu_exec_exit(CPUState *cpu)
616{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530617 CPUClass *cc = CPU_GET_CLASS(cpu);
618
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530619 cpu_list_lock();
Igor Mammedov8b1b8352016-07-25 11:59:20 +0200620 if (cpu->node.tqe_prev == NULL) {
621 /* there is nothing to undo since cpu_exec_init() hasn't been called */
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530622 cpu_list_unlock();
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530623 return;
624 }
625
Igor Mammedov630eb0f2016-07-27 11:24:54 +0200626 assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ)));
627
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530628 QTAILQ_REMOVE(&cpus, cpu, node);
Igor Mammedov8b1b8352016-07-25 11:59:20 +0200629 cpu->node.tqe_prev = NULL;
Igor Mammedova07f9532016-07-25 11:59:21 +0200630 cpu->cpu_index = UNASSIGNED_CPU_INDEX;
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530631 cpu_list_unlock();
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530632
633 if (cc->vmsd != NULL) {
634 vmstate_unregister(NULL, cc->vmsd, cpu);
635 }
636 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
637 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
638 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530639}
640
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700641void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000642{
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200643 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Igor Mammedova07f9532016-07-25 11:59:21 +0200644 Error *local_err ATTRIBUTE_UNUSED = NULL;
bellard6a00d602005-11-21 23:25:50 +0000645
Peter Maydell56943e82016-01-21 14:15:04 +0000646 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000647 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000648
Eduardo Habkost291135b2015-04-27 17:00:33 -0300649#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300650 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000651
652 /* This is a softmmu CPU object, so create a property for it
653 * so users can wire up its memory. (This can't go in qom/cpu.c
654 * because that file is compiled only once for both user-mode
655 * and system builds.) The default if no link is set up is to use
656 * the system address space.
657 */
658 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
659 (Object **)&cpu->memory,
660 qdev_prop_allow_set_link_before_realize,
661 OBJ_PROP_LINK_UNREF_ON_RELEASE,
662 &error_abort);
663 cpu->memory = system_memory;
664 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300665#endif
666
pbrookc2764712009-03-07 15:24:59 +0000667 cpu_list_lock();
Igor Mammedova07f9532016-07-25 11:59:21 +0200668 if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
669 cpu->cpu_index = cpu_get_free_index();
670 assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
Igor Mammedov630eb0f2016-07-27 11:24:54 +0200671 } else {
672 assert(!cpu_index_auto_assigned);
bellard6a00d602005-11-21 23:25:50 +0000673 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200674 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000675 cpu_list_unlock();
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200676
677#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200678 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200679 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200680 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100681 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200682 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100683 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200684#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000685}
686
Paul Brook94df27f2010-02-28 23:47:45 +0000687#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200688static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000689{
690 tb_invalidate_phys_page_range(pc, pc + 1, 0);
691}
692#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200693static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400694{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000695 MemTxAttrs attrs;
696 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
697 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400698 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000699 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100700 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400701 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400702}
bellardc27004e2005-01-03 23:35:10 +0000703#endif
bellardd720b932004-04-25 17:57:43 +0000704
Paul Brookc527ee82010-03-01 03:31:14 +0000705#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200706void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000707
708{
709}
710
Peter Maydell3ee887e2014-09-12 14:06:48 +0100711int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
712 int flags)
713{
714 return -ENOSYS;
715}
716
717void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
718{
719}
720
Andreas Färber75a34032013-09-02 16:57:02 +0200721int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000722 int flags, CPUWatchpoint **watchpoint)
723{
724 return -ENOSYS;
725}
726#else
pbrook6658ffb2007-03-16 23:58:11 +0000727/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200728int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000729 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000730{
aliguoric0ce9982008-11-25 22:13:57 +0000731 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000732
Peter Maydell05068c02014-09-12 14:06:48 +0100733 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700734 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200735 error_report("tried to set invalid watchpoint at %"
736 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000737 return -EINVAL;
738 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500739 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000740
aliguoria1d1bb32008-11-18 20:07:32 +0000741 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100742 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000743 wp->flags = flags;
744
aliguori2dc9f412008-11-18 20:56:59 +0000745 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200746 if (flags & BP_GDB) {
747 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
748 } else {
749 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
750 }
aliguoria1d1bb32008-11-18 20:07:32 +0000751
Andreas Färber31b030d2013-09-04 01:29:02 +0200752 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000753
754 if (watchpoint)
755 *watchpoint = wp;
756 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000757}
758
aliguoria1d1bb32008-11-18 20:07:32 +0000759/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200760int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000761 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000762{
aliguoria1d1bb32008-11-18 20:07:32 +0000763 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000764
Andreas Färberff4700b2013-08-26 18:23:18 +0200765 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100766 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000767 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200768 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000769 return 0;
770 }
771 }
aliguoria1d1bb32008-11-18 20:07:32 +0000772 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000773}
774
aliguoria1d1bb32008-11-18 20:07:32 +0000775/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200776void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000777{
Andreas Färberff4700b2013-08-26 18:23:18 +0200778 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000779
Andreas Färber31b030d2013-09-04 01:29:02 +0200780 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000781
Anthony Liguori7267c092011-08-20 22:09:37 -0500782 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000783}
784
aliguoria1d1bb32008-11-18 20:07:32 +0000785/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200786void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000787{
aliguoric0ce9982008-11-25 22:13:57 +0000788 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000789
Andreas Färberff4700b2013-08-26 18:23:18 +0200790 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200791 if (wp->flags & mask) {
792 cpu_watchpoint_remove_by_ref(cpu, wp);
793 }
aliguoric0ce9982008-11-25 22:13:57 +0000794 }
aliguoria1d1bb32008-11-18 20:07:32 +0000795}
Peter Maydell05068c02014-09-12 14:06:48 +0100796
797/* Return true if this watchpoint address matches the specified
798 * access (ie the address range covered by the watchpoint overlaps
799 * partially or completely with the address range covered by the
800 * access).
801 */
802static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
803 vaddr addr,
804 vaddr len)
805{
806 /* We know the lengths are non-zero, but a little caution is
807 * required to avoid errors in the case where the range ends
808 * exactly at the top of the address space and so addr + len
809 * wraps round to zero.
810 */
811 vaddr wpend = wp->vaddr + wp->len - 1;
812 vaddr addrend = addr + len - 1;
813
814 return !(addr > wpend || wp->vaddr > addrend);
815}
816
Paul Brookc527ee82010-03-01 03:31:14 +0000817#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000818
819/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200820int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000821 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000822{
aliguoric0ce9982008-11-25 22:13:57 +0000823 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000824
Anthony Liguori7267c092011-08-20 22:09:37 -0500825 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000826
827 bp->pc = pc;
828 bp->flags = flags;
829
aliguori2dc9f412008-11-18 20:56:59 +0000830 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200831 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200832 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200833 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200834 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200835 }
aliguoria1d1bb32008-11-18 20:07:32 +0000836
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000838
Andreas Färber00b941e2013-06-29 18:55:54 +0200839 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000840 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200841 }
aliguoria1d1bb32008-11-18 20:07:32 +0000842 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000843}
844
845/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200846int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000847{
aliguoria1d1bb32008-11-18 20:07:32 +0000848 CPUBreakpoint *bp;
849
Andreas Färberf0c3c502013-08-26 21:22:53 +0200850 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000851 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200852 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000853 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000854 }
bellard4c3a88a2003-07-26 12:06:08 +0000855 }
aliguoria1d1bb32008-11-18 20:07:32 +0000856 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000857}
858
aliguoria1d1bb32008-11-18 20:07:32 +0000859/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200860void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000861{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200862 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
863
864 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000865
Anthony Liguori7267c092011-08-20 22:09:37 -0500866 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000867}
868
869/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200870void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000871{
aliguoric0ce9982008-11-25 22:13:57 +0000872 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000873
Andreas Färberf0c3c502013-08-26 21:22:53 +0200874 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200875 if (bp->flags & mask) {
876 cpu_breakpoint_remove_by_ref(cpu, bp);
877 }
aliguoric0ce9982008-11-25 22:13:57 +0000878 }
bellard4c3a88a2003-07-26 12:06:08 +0000879}
880
bellardc33a3462003-07-29 20:50:33 +0000881/* enable or disable single step mode. EXCP_DEBUG is returned by the
882 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200883void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000884{
Andreas Färbered2803d2013-06-21 20:20:45 +0200885 if (cpu->singlestep_enabled != enabled) {
886 cpu->singlestep_enabled = enabled;
887 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200888 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200889 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100890 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000891 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700892 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000893 }
bellardc33a3462003-07-29 20:50:33 +0000894 }
bellardc33a3462003-07-29 20:50:33 +0000895}
896
Andreas Färbera47dddd2013-09-03 17:38:47 +0200897void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000898{
899 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000900 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000901
902 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000903 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000904 fprintf(stderr, "qemu: fatal: ");
905 vfprintf(stderr, fmt, ap);
906 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200907 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100908 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000909 qemu_log("qemu: fatal: ");
910 qemu_log_vprintf(fmt, ap2);
911 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200912 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000913 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000914 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000915 }
pbrook493ae1f2007-11-23 16:53:59 +0000916 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000917 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300918 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200919#if defined(CONFIG_USER_ONLY)
920 {
921 struct sigaction act;
922 sigfillset(&act.sa_mask);
923 act.sa_handler = SIG_DFL;
924 sigaction(SIGABRT, &act, NULL);
925 }
926#endif
bellard75012672003-06-21 13:11:07 +0000927 abort();
928}
929
bellard01243112004-01-04 15:48:17 +0000930#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400931/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200932static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
933{
934 RAMBlock *block;
935
Paolo Bonzini43771532013-09-09 17:58:40 +0200936 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200937 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200938 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939 }
Mike Day0dc3f442013-09-05 14:41:35 -0400940 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200941 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200942 goto found;
943 }
944 }
945
946 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
947 abort();
948
949found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200950 /* It is safe to write mru_block outside the iothread lock. This
951 * is what happens:
952 *
953 * mru_block = xxx
954 * rcu_read_unlock()
955 * xxx removed from list
956 * rcu_read_lock()
957 * read mru_block
958 * mru_block = NULL;
959 * call_rcu(reclaim_ramblock, xxx);
960 * rcu_read_unlock()
961 *
962 * atomic_rcu_set is not needed here. The block was already published
963 * when it was placed into the list. Here we're just making an extra
964 * copy of the pointer.
965 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200966 ram_list.mru_block = block;
967 return block;
968}
969
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200970static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000971{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700972 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200973 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200974 RAMBlock *block;
975 ram_addr_t end;
976
977 end = TARGET_PAGE_ALIGN(start + length);
978 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000979
Mike Day0dc3f442013-09-05 14:41:35 -0400980 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200981 block = qemu_get_ram_block(start);
982 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200983 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700984 CPU_FOREACH(cpu) {
985 tlb_reset_dirty(cpu, start1, length);
986 }
Mike Day0dc3f442013-09-05 14:41:35 -0400987 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200988}
989
990/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000991bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
992 ram_addr_t length,
993 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200994{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000995 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000996 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000997 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200998
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000999 if (length == 0) {
1000 return false;
1001 }
1002
1003 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1004 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001005
1006 rcu_read_lock();
1007
1008 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1009
1010 while (page < end) {
1011 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1012 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1013 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1014
1015 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1016 offset, num);
1017 page += num;
1018 }
1019
1020 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001021
1022 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001023 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001024 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001025
1026 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001027}
1028
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001029/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001030hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001031 MemoryRegionSection *section,
1032 target_ulong vaddr,
1033 hwaddr paddr, hwaddr xlat,
1034 int prot,
1035 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001036{
Avi Kivitya8170e52012-10-23 12:30:10 +02001037 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001038 CPUWatchpoint *wp;
1039
Blue Swirlcc5bea62012-04-14 14:56:48 +00001040 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001041 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001042 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001043 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001044 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001045 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001046 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001047 }
1048 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001049 AddressSpaceDispatch *d;
1050
1051 d = atomic_rcu_read(&section->address_space->dispatch);
1052 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001053 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001054 }
1055
1056 /* Make accesses to pages with watchpoints go via the
1057 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001058 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001059 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001060 /* Avoid trapping reads of pages with a write breakpoint. */
1061 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001062 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001063 *address |= TLB_MMIO;
1064 break;
1065 }
1066 }
1067 }
1068
1069 return iotlb;
1070}
bellard9fa3e852004-01-04 18:06:42 +00001071#endif /* defined(CONFIG_USER_ONLY) */
1072
pbrooke2eef172008-06-08 01:09:01 +00001073#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001074
Anthony Liguoric227f092009-10-01 16:12:16 -05001075static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001076 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001077static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001078
Igor Mammedova2b257d2014-10-31 16:38:37 +00001079static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1080 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001081
1082/*
1083 * Set a custom physical guest memory alloator.
1084 * Accelerators with unusual needs may need this. Hopefully, we can
1085 * get rid of it eventually.
1086 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001087void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001088{
1089 phys_mem_alloc = alloc;
1090}
1091
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001092static uint16_t phys_section_add(PhysPageMap *map,
1093 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001094{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001095 /* The physical section number is ORed with a page-aligned
1096 * pointer to produce the iotlb entries. Thus it should
1097 * never overflow into the page-aligned value.
1098 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001100
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001101 if (map->sections_nb == map->sections_nb_alloc) {
1102 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1103 map->sections = g_renew(MemoryRegionSection, map->sections,
1104 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001105 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001107 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001108 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001109}
1110
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001111static void phys_section_destroy(MemoryRegion *mr)
1112{
Don Slutz55b4e802015-11-30 17:11:04 -05001113 bool have_sub_page = mr->subpage;
1114
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001115 memory_region_unref(mr);
1116
Don Slutz55b4e802015-11-30 17:11:04 -05001117 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001118 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001119 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001120 g_free(subpage);
1121 }
1122}
1123
Paolo Bonzini60926662013-05-29 12:30:26 +02001124static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001125{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001126 while (map->sections_nb > 0) {
1127 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001128 phys_section_destroy(section->mr);
1129 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001130 g_free(map->sections);
1131 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001132}
1133
Avi Kivityac1970f2012-10-03 16:22:53 +02001134static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001135{
1136 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001137 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001138 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001139 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001140 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001141 MemoryRegionSection subsection = {
1142 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001143 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001144 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001145 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146
Avi Kivityf3705d52012-03-08 16:16:34 +02001147 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001148
Avi Kivityf3705d52012-03-08 16:16:34 +02001149 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001150 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001151 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001152 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001153 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001154 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001155 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001156 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157 }
1158 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001159 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001160 subpage_register(subpage, start, end,
1161 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001162}
1163
1164
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001165static void register_multipage(AddressSpaceDispatch *d,
1166 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001167{
Avi Kivitya8170e52012-10-23 12:30:10 +02001168 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001169 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001170 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1171 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001172
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001173 assert(num_pages);
1174 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001175}
1176
Avi Kivityac1970f2012-10-03 16:22:53 +02001177static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001178{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001179 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001180 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001181 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001182 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001183
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001184 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1185 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1186 - now.offset_within_address_space;
1187
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001188 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001189 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001190 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001191 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001192 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 while (int128_ne(remain.size, now.size)) {
1194 remain.size = int128_sub(remain.size, now.size);
1195 remain.offset_within_address_space += int128_get64(now.size);
1196 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001197 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001199 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001200 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001201 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001202 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001203 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001204 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001205 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001206 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001207 }
1208}
1209
Sheng Yang62a27442010-01-26 19:21:16 +08001210void qemu_flush_coalesced_mmio_buffer(void)
1211{
1212 if (kvm_enabled())
1213 kvm_flush_coalesced_mmio_buffer();
1214}
1215
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001216void qemu_mutex_lock_ramlist(void)
1217{
1218 qemu_mutex_lock(&ram_list.mutex);
1219}
1220
1221void qemu_mutex_unlock_ramlist(void)
1222{
1223 qemu_mutex_unlock(&ram_list.mutex);
1224}
1225
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001226#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001227static void *file_ram_alloc(RAMBlock *block,
1228 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001229 const char *path,
1230 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001231{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001232 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001233 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001234 char *sanitized_name;
1235 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001236 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001237 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001238 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001239
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001240 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1241 error_setg(errp,
1242 "host lacks kvm mmu notifiers, -mem-path unsupported");
1243 return NULL;
1244 }
1245
1246 for (;;) {
1247 fd = open(path, O_RDWR);
1248 if (fd >= 0) {
1249 /* @path names an existing file, use it */
1250 break;
1251 }
1252 if (errno == ENOENT) {
1253 /* @path names a file that doesn't exist, create it */
1254 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1255 if (fd >= 0) {
1256 unlink_on_error = true;
1257 break;
1258 }
1259 } else if (errno == EISDIR) {
1260 /* @path names a directory, create a file there */
1261 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1262 sanitized_name = g_strdup(memory_region_name(block->mr));
1263 for (c = sanitized_name; *c != '\0'; c++) {
1264 if (*c == '/') {
1265 *c = '_';
1266 }
1267 }
1268
1269 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1270 sanitized_name);
1271 g_free(sanitized_name);
1272
1273 fd = mkstemp(filename);
1274 if (fd >= 0) {
1275 unlink(filename);
1276 g_free(filename);
1277 break;
1278 }
1279 g_free(filename);
1280 }
1281 if (errno != EEXIST && errno != EINTR) {
1282 error_setg_errno(errp, errno,
1283 "can't open backing store %s for guest RAM",
1284 path);
1285 goto error;
1286 }
1287 /*
1288 * Try again on EINTR and EEXIST. The latter happens when
1289 * something else creates the file between our two open().
1290 */
1291 }
1292
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001293 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001294 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001295
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001296 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001297 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001298 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001299 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001300 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001301 }
1302
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001303 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001304
1305 /*
1306 * ftruncate is not supported by hugetlbfs in older
1307 * hosts, so don't bother bailing out on errors.
1308 * If anything goes wrong with it under other filesystems,
1309 * mmap will fail.
1310 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001311 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001312 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001313 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001314
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001315 area = qemu_ram_mmap(fd, memory, block->mr->align,
1316 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001317 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001318 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001319 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001320 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001321 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001322
1323 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001324 os_mem_prealloc(fd, area, memory, errp);
1325 if (errp && *errp) {
1326 goto error;
1327 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001328 }
1329
Alex Williamson04b16652010-07-02 11:13:17 -06001330 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001331 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001332
1333error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001334 if (area != MAP_FAILED) {
1335 qemu_ram_munmap(area, memory);
1336 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001337 if (unlink_on_error) {
1338 unlink(path);
1339 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001340 if (fd != -1) {
1341 close(fd);
1342 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001343 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001344}
1345#endif
1346
Mike Day0dc3f442013-09-05 14:41:35 -04001347/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001348static ram_addr_t find_ram_offset(ram_addr_t size)
1349{
Alex Williamson04b16652010-07-02 11:13:17 -06001350 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001351 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001352
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001353 assert(size != 0); /* it would hand out same offset multiple times */
1354
Mike Day0dc3f442013-09-05 14:41:35 -04001355 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001356 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001357 }
Alex Williamson04b16652010-07-02 11:13:17 -06001358
Mike Day0dc3f442013-09-05 14:41:35 -04001359 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001360 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001361
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001362 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001363
Mike Day0dc3f442013-09-05 14:41:35 -04001364 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001365 if (next_block->offset >= end) {
1366 next = MIN(next, next_block->offset);
1367 }
1368 }
1369 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001370 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001371 mingap = next - end;
1372 }
1373 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001374
1375 if (offset == RAM_ADDR_MAX) {
1376 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1377 (uint64_t)size);
1378 abort();
1379 }
1380
Alex Williamson04b16652010-07-02 11:13:17 -06001381 return offset;
1382}
1383
Juan Quintela652d7ec2012-07-20 10:37:54 +02001384ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001385{
Alex Williamsond17b5282010-06-25 11:08:38 -06001386 RAMBlock *block;
1387 ram_addr_t last = 0;
1388
Mike Day0dc3f442013-09-05 14:41:35 -04001389 rcu_read_lock();
1390 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001391 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001392 }
Mike Day0dc3f442013-09-05 14:41:35 -04001393 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001394 return last;
1395}
1396
Jason Baronddb97f12012-08-02 15:44:16 -04001397static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1398{
1399 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001400
1401 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001402 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001403 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1404 if (ret) {
1405 perror("qemu_madvise");
1406 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1407 "but dump_guest_core=off specified\n");
1408 }
1409 }
1410}
1411
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001412const char *qemu_ram_get_idstr(RAMBlock *rb)
1413{
1414 return rb->idstr;
1415}
1416
Mike Dayae3a7042013-09-05 14:41:35 -04001417/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001418void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001419{
Gongleifa53a0e2016-05-10 10:04:59 +08001420 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001421
Avi Kivityc5705a72011-12-20 15:59:12 +02001422 assert(new_block);
1423 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001424
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001425 if (dev) {
1426 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001427 if (id) {
1428 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001429 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001430 }
1431 }
1432 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1433
Gongleiab0a9952016-05-10 10:05:00 +08001434 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001435 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001436 if (block != new_block &&
1437 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001438 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1439 new_block->idstr);
1440 abort();
1441 }
1442 }
Mike Day0dc3f442013-09-05 14:41:35 -04001443 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001444}
1445
Mike Dayae3a7042013-09-05 14:41:35 -04001446/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001447void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001448{
Mike Dayae3a7042013-09-05 14:41:35 -04001449 /* FIXME: arch_init.c assumes that this is not called throughout
1450 * migration. Ignore the problem since hot-unplug during migration
1451 * does not work anyway.
1452 */
Hu Tao20cfe882014-04-02 15:13:26 +08001453 if (block) {
1454 memset(block->idstr, 0, sizeof(block->idstr));
1455 }
1456}
1457
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001458static int memory_try_enable_merging(void *addr, size_t len)
1459{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001460 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001461 /* disabled by the user */
1462 return 0;
1463 }
1464
1465 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1466}
1467
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001468/* Only legal before guest might have detected the memory size: e.g. on
1469 * incoming migration, or right after reset.
1470 *
1471 * As memory core doesn't know how is memory accessed, it is up to
1472 * resize callback to update device state and/or add assertions to detect
1473 * misuse, if necessary.
1474 */
Gongleifa53a0e2016-05-10 10:04:59 +08001475int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001476{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001477 assert(block);
1478
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001479 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001480
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001481 if (block->used_length == newsize) {
1482 return 0;
1483 }
1484
1485 if (!(block->flags & RAM_RESIZEABLE)) {
1486 error_setg_errno(errp, EINVAL,
1487 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1488 " in != 0x" RAM_ADDR_FMT, block->idstr,
1489 newsize, block->used_length);
1490 return -EINVAL;
1491 }
1492
1493 if (block->max_length < newsize) {
1494 error_setg_errno(errp, EINVAL,
1495 "Length too large: %s: 0x" RAM_ADDR_FMT
1496 " > 0x" RAM_ADDR_FMT, block->idstr,
1497 newsize, block->max_length);
1498 return -EINVAL;
1499 }
1500
1501 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1502 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001503 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1504 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001505 memory_region_set_size(block->mr, newsize);
1506 if (block->resized) {
1507 block->resized(block->idstr, newsize, block->host);
1508 }
1509 return 0;
1510}
1511
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001512/* Called with ram_list.mutex held */
1513static void dirty_memory_extend(ram_addr_t old_ram_size,
1514 ram_addr_t new_ram_size)
1515{
1516 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1517 DIRTY_MEMORY_BLOCK_SIZE);
1518 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1519 DIRTY_MEMORY_BLOCK_SIZE);
1520 int i;
1521
1522 /* Only need to extend if block count increased */
1523 if (new_num_blocks <= old_num_blocks) {
1524 return;
1525 }
1526
1527 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1528 DirtyMemoryBlocks *old_blocks;
1529 DirtyMemoryBlocks *new_blocks;
1530 int j;
1531
1532 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1533 new_blocks = g_malloc(sizeof(*new_blocks) +
1534 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1535
1536 if (old_num_blocks) {
1537 memcpy(new_blocks->blocks, old_blocks->blocks,
1538 old_num_blocks * sizeof(old_blocks->blocks[0]));
1539 }
1540
1541 for (j = old_num_blocks; j < new_num_blocks; j++) {
1542 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1543 }
1544
1545 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1546
1547 if (old_blocks) {
1548 g_free_rcu(old_blocks, rcu);
1549 }
1550 }
1551}
1552
Fam Zheng528f46a2016-03-01 14:18:18 +08001553static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001554{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001555 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001556 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001557 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001558 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001559
1560 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001561
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001562 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001563 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001564
1565 if (!new_block->host) {
1566 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001567 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001568 new_block->mr, &err);
1569 if (err) {
1570 error_propagate(errp, err);
1571 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001572 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001573 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001574 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001575 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001576 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001577 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001578 error_setg_errno(errp, errno,
1579 "cannot set up guest memory '%s'",
1580 memory_region_name(new_block->mr));
1581 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001582 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001583 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001584 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001585 }
1586 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001587
Li Zhijiandd631692015-07-02 20:18:06 +08001588 new_ram_size = MAX(old_ram_size,
1589 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1590 if (new_ram_size > old_ram_size) {
1591 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001592 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001593 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001594 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1595 * QLIST (which has an RCU-friendly variant) does not have insertion at
1596 * tail, so save the last element in last_block.
1597 */
Mike Day0dc3f442013-09-05 14:41:35 -04001598 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001599 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001600 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001601 break;
1602 }
1603 }
1604 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001605 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001606 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001607 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001608 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001609 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001610 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001611 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001612
Mike Day0dc3f442013-09-05 14:41:35 -04001613 /* Write list before version */
1614 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001615 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001616 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001617
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001618 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001619 new_block->used_length,
1620 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001621
Paolo Bonzinia904c912015-01-21 16:18:35 +01001622 if (new_block->host) {
1623 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1624 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1625 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1626 if (kvm_enabled()) {
1627 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1628 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001629 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001630}
1631
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001632#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001633RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1634 bool share, const char *mem_path,
1635 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001636{
1637 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001638 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001639
1640 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001641 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001642 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001643 }
1644
1645 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1646 /*
1647 * file_ram_alloc() needs to allocate just like
1648 * phys_mem_alloc, but we haven't bothered to provide
1649 * a hook there.
1650 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001651 error_setg(errp,
1652 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001653 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001654 }
1655
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001656 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657 new_block = g_malloc0(sizeof(*new_block));
1658 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001659 new_block->used_length = size;
1660 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001661 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001662 new_block->host = file_ram_alloc(new_block, size,
1663 mem_path, errp);
1664 if (!new_block->host) {
1665 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001666 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001667 }
1668
Fam Zheng528f46a2016-03-01 14:18:18 +08001669 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001670 if (local_err) {
1671 g_free(new_block);
1672 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001673 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001674 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001675 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001676}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001677#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001678
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001679static
Fam Zheng528f46a2016-03-01 14:18:18 +08001680RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1681 void (*resized)(const char*,
1682 uint64_t length,
1683 void *host),
1684 void *host, bool resizeable,
1685 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001686{
1687 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001688 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001689
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001690 size = HOST_PAGE_ALIGN(size);
1691 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001692 new_block = g_malloc0(sizeof(*new_block));
1693 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001694 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001695 new_block->used_length = size;
1696 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001697 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001698 new_block->fd = -1;
1699 new_block->host = host;
1700 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001701 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001702 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001703 if (resizeable) {
1704 new_block->flags |= RAM_RESIZEABLE;
1705 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001706 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001707 if (local_err) {
1708 g_free(new_block);
1709 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001710 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001711 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001712 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001713}
1714
Fam Zheng528f46a2016-03-01 14:18:18 +08001715RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001716 MemoryRegion *mr, Error **errp)
1717{
1718 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1719}
1720
Fam Zheng528f46a2016-03-01 14:18:18 +08001721RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001722{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001723 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1724}
1725
Fam Zheng528f46a2016-03-01 14:18:18 +08001726RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001727 void (*resized)(const char*,
1728 uint64_t length,
1729 void *host),
1730 MemoryRegion *mr, Error **errp)
1731{
1732 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001733}
bellarde9a1ab12007-02-08 23:08:38 +00001734
Paolo Bonzini43771532013-09-09 17:58:40 +02001735static void reclaim_ramblock(RAMBlock *block)
1736{
1737 if (block->flags & RAM_PREALLOC) {
1738 ;
1739 } else if (xen_enabled()) {
1740 xen_invalidate_map_cache_entry(block->host);
1741#ifndef _WIN32
1742 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001743 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001744 close(block->fd);
1745#endif
1746 } else {
1747 qemu_anon_ram_free(block->host, block->max_length);
1748 }
1749 g_free(block);
1750}
1751
Fam Zhengf1060c52016-03-01 14:18:22 +08001752void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001753{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001754 if (!block) {
1755 return;
1756 }
1757
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001758 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001759 QLIST_REMOVE_RCU(block, next);
1760 ram_list.mru_block = NULL;
1761 /* Write list before version */
1762 smp_wmb();
1763 ram_list.version++;
1764 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001765 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001766}
1767
Huang Yingcd19cfa2011-03-02 08:56:19 +01001768#ifndef _WIN32
1769void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1770{
1771 RAMBlock *block;
1772 ram_addr_t offset;
1773 int flags;
1774 void *area, *vaddr;
1775
Mike Day0dc3f442013-09-05 14:41:35 -04001776 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001777 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001778 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001779 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001780 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001781 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001782 } else if (xen_enabled()) {
1783 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001784 } else {
1785 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001786 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001787 flags |= (block->flags & RAM_SHARED ?
1788 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001789 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1790 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001791 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001792 /*
1793 * Remap needs to match alloc. Accelerators that
1794 * set phys_mem_alloc never remap. If they did,
1795 * we'd need a remap hook here.
1796 */
1797 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1798
Huang Yingcd19cfa2011-03-02 08:56:19 +01001799 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1800 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1801 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001802 }
1803 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001804 fprintf(stderr, "Could not remap addr: "
1805 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001806 length, addr);
1807 exit(1);
1808 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001809 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001810 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001811 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001812 }
1813 }
1814}
1815#endif /* !_WIN32 */
1816
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001817/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001818 * This should not be used for general purpose DMA. Use address_space_map
1819 * or address_space_rw instead. For local memory (e.g. video ram) that the
1820 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001821 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001822 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001823 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001824void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001825{
Gonglei3655cb92016-02-20 10:35:20 +08001826 RAMBlock *block = ram_block;
1827
1828 if (block == NULL) {
1829 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001830 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001831 }
Mike Dayae3a7042013-09-05 14:41:35 -04001832
1833 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001834 /* We need to check if the requested address is in the RAM
1835 * because we don't want to map the entire memory in QEMU.
1836 * In that case just map until the end of the page.
1837 */
1838 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001839 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001840 }
Mike Dayae3a7042013-09-05 14:41:35 -04001841
1842 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001843 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001844 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001845}
1846
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001847/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001848 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001849 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001850 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001851 */
Gonglei3655cb92016-02-20 10:35:20 +08001852static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1853 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001854{
Gonglei3655cb92016-02-20 10:35:20 +08001855 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001856 if (*size == 0) {
1857 return NULL;
1858 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001859
Gonglei3655cb92016-02-20 10:35:20 +08001860 if (block == NULL) {
1861 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001862 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001863 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001864 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001865
1866 if (xen_enabled() && block->host == NULL) {
1867 /* We need to check if the requested address is in the RAM
1868 * because we don't want to map the entire memory in QEMU.
1869 * In that case just map the requested area.
1870 */
1871 if (block->offset == 0) {
1872 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001873 }
1874
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001875 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001876 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001877
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001878 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001879}
1880
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001881/*
1882 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1883 * in that RAMBlock.
1884 *
1885 * ptr: Host pointer to look up
1886 * round_offset: If true round the result offset down to a page boundary
1887 * *ram_addr: set to result ram_addr
1888 * *offset: set to result offset within the RAMBlock
1889 *
1890 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001891 *
1892 * By the time this function returns, the returned pointer is not protected
1893 * by RCU anymore. If the caller is not within an RCU critical section and
1894 * does not hold the iothread lock, it must have other means of protecting the
1895 * pointer, such as a reference to the region that includes the incoming
1896 * ram_addr_t.
1897 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001898RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001899 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001900{
pbrook94a6b542009-04-11 17:15:54 +00001901 RAMBlock *block;
1902 uint8_t *host = ptr;
1903
Jan Kiszka868bb332011-06-21 22:59:09 +02001904 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001905 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001906 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001907 ram_addr = xen_ram_addr_from_mapcache(ptr);
1908 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001909 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001910 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001911 }
Mike Day0dc3f442013-09-05 14:41:35 -04001912 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001913 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001914 }
1915
Mike Day0dc3f442013-09-05 14:41:35 -04001916 rcu_read_lock();
1917 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001918 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001919 goto found;
1920 }
1921
Mike Day0dc3f442013-09-05 14:41:35 -04001922 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001923 /* This case append when the block is not mapped. */
1924 if (block->host == NULL) {
1925 continue;
1926 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001927 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001928 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001929 }
pbrook94a6b542009-04-11 17:15:54 +00001930 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001931
Mike Day0dc3f442013-09-05 14:41:35 -04001932 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001933 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001934
1935found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001936 *offset = (host - block->host);
1937 if (round_offset) {
1938 *offset &= TARGET_PAGE_MASK;
1939 }
Mike Day0dc3f442013-09-05 14:41:35 -04001940 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001941 return block;
1942}
1943
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001944/*
1945 * Finds the named RAMBlock
1946 *
1947 * name: The name of RAMBlock to find
1948 *
1949 * Returns: RAMBlock (or NULL if not found)
1950 */
1951RAMBlock *qemu_ram_block_by_name(const char *name)
1952{
1953 RAMBlock *block;
1954
1955 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1956 if (!strcmp(name, block->idstr)) {
1957 return block;
1958 }
1959 }
1960
1961 return NULL;
1962}
1963
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001964/* Some of the softmmu routines need to translate from a host pointer
1965 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001966ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001967{
1968 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001969 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001970
Paolo Bonzinif615f392016-05-26 10:07:50 +02001971 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001972 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001973 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001974 }
1975
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001976 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001977}
Alex Williamsonf471a172010-06-11 11:11:42 -06001978
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001979/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001980static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001981 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001982{
Juan Quintela52159192013-10-08 12:44:04 +02001983 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001984 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001985 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001986 switch (size) {
1987 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001988 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001989 break;
1990 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001991 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001992 break;
1993 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001994 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001995 break;
1996 default:
1997 abort();
1998 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01001999 /* Set both VGA and migration bits for simplicity and to remove
2000 * the notdirty callback faster.
2001 */
2002 cpu_physical_memory_set_dirty_range(ram_addr, size,
2003 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002004 /* we remove the notdirty callback only if the code has been
2005 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002006 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002007 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002008 }
bellard1ccde1c2004-02-06 19:46:14 +00002009}
2010
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002011static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2012 unsigned size, bool is_write)
2013{
2014 return is_write;
2015}
2016
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002017static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002018 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002019 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002020 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002021};
2022
pbrook0f459d12008-06-09 00:20:13 +00002023/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002024static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002025{
Andreas Färber93afead2013-08-26 03:41:01 +02002026 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002027 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002028 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002029 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002030 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002031 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002032 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002033
Andreas Färberff4700b2013-08-26 18:23:18 +02002034 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002035 /* We re-entered the check after replacing the TB. Now raise
2036 * the debug interrupt so that is will trigger after the
2037 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002038 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002039 return;
2040 }
Andreas Färber93afead2013-08-26 03:41:01 +02002041 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002042 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002043 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2044 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002045 if (flags == BP_MEM_READ) {
2046 wp->flags |= BP_WATCHPOINT_HIT_READ;
2047 } else {
2048 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2049 }
2050 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002051 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002052 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002053 if (wp->flags & BP_CPU &&
2054 !cc->debug_check_watchpoint(cpu, wp)) {
2055 wp->flags &= ~BP_WATCHPOINT_HIT;
2056 continue;
2057 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002058 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002059 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002060 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002061 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002062 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002063 } else {
2064 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002065 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002066 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002067 }
aliguori06d55cc2008-11-18 20:24:06 +00002068 }
aliguori6e140f22008-11-18 20:37:55 +00002069 } else {
2070 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002071 }
2072 }
2073}
2074
pbrook6658ffb2007-03-16 23:58:11 +00002075/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2076 so these check for a hit then pass through to the normal out-of-line
2077 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002078static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2079 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002080{
Peter Maydell66b9b432015-04-26 16:49:24 +01002081 MemTxResult res;
2082 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002083 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2084 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002085
Peter Maydell66b9b432015-04-26 16:49:24 +01002086 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002087 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002088 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002089 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002090 break;
2091 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002092 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002093 break;
2094 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002095 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002096 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002097 default: abort();
2098 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002099 *pdata = data;
2100 return res;
2101}
2102
2103static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2104 uint64_t val, unsigned size,
2105 MemTxAttrs attrs)
2106{
2107 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002108 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2109 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002110
2111 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2112 switch (size) {
2113 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002114 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002115 break;
2116 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002117 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002118 break;
2119 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002120 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002121 break;
2122 default: abort();
2123 }
2124 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002125}
2126
Avi Kivity1ec9b902012-01-02 12:47:48 +02002127static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002128 .read_with_attrs = watch_mem_read,
2129 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002130 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002131};
pbrook6658ffb2007-03-16 23:58:11 +00002132
Peter Maydellf25a49e2015-04-26 16:49:24 +01002133static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2134 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002135{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002136 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002137 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002138 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002139
blueswir1db7b5422007-05-26 17:36:03 +00002140#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002141 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002142 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002143#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002144 res = address_space_read(subpage->as, addr + subpage->base,
2145 attrs, buf, len);
2146 if (res) {
2147 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002148 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002149 switch (len) {
2150 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002151 *data = ldub_p(buf);
2152 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002153 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002154 *data = lduw_p(buf);
2155 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002156 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002157 *data = ldl_p(buf);
2158 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002159 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002160 *data = ldq_p(buf);
2161 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002162 default:
2163 abort();
2164 }
blueswir1db7b5422007-05-26 17:36:03 +00002165}
2166
Peter Maydellf25a49e2015-04-26 16:49:24 +01002167static MemTxResult subpage_write(void *opaque, hwaddr addr,
2168 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002169{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002170 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002171 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002172
blueswir1db7b5422007-05-26 17:36:03 +00002173#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002174 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002175 " value %"PRIx64"\n",
2176 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002177#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002178 switch (len) {
2179 case 1:
2180 stb_p(buf, value);
2181 break;
2182 case 2:
2183 stw_p(buf, value);
2184 break;
2185 case 4:
2186 stl_p(buf, value);
2187 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002188 case 8:
2189 stq_p(buf, value);
2190 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002191 default:
2192 abort();
2193 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002194 return address_space_write(subpage->as, addr + subpage->base,
2195 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002196}
2197
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002198static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002199 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002200{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002201 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002202#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002203 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002204 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002205#endif
2206
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002207 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002208 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002209}
2210
Avi Kivity70c68e42012-01-02 12:32:48 +02002211static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002212 .read_with_attrs = subpage_read,
2213 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002214 .impl.min_access_size = 1,
2215 .impl.max_access_size = 8,
2216 .valid.min_access_size = 1,
2217 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002218 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002219 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002220};
2221
Anthony Liguoric227f092009-10-01 16:12:16 -05002222static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002223 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002224{
2225 int idx, eidx;
2226
2227 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2228 return -1;
2229 idx = SUBPAGE_IDX(start);
2230 eidx = SUBPAGE_IDX(end);
2231#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002232 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2233 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002234#endif
blueswir1db7b5422007-05-26 17:36:03 +00002235 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002236 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002237 }
2238
2239 return 0;
2240}
2241
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002242static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002243{
Anthony Liguoric227f092009-10-01 16:12:16 -05002244 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002245
Anthony Liguori7267c092011-08-20 22:09:37 -05002246 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002247
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002248 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002249 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002250 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002251 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002252 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002253#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002254 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2255 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002256#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002257 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002258
2259 return mmio;
2260}
2261
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002262static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2263 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002264{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002265 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002266 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002267 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002268 .mr = mr,
2269 .offset_within_address_space = 0,
2270 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002271 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002272 };
2273
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002274 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002275}
2276
Peter Maydella54c87b2016-01-21 14:15:05 +00002277MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002278{
Peter Maydella54c87b2016-01-21 14:15:05 +00002279 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2280 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002281 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002282 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002283
2284 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002285}
2286
Avi Kivitye9179ce2009-06-14 11:38:52 +03002287static void io_mem_init(void)
2288{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002289 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002290 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002291 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002292 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002293 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002294 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002295 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002296}
2297
Avi Kivityac1970f2012-10-03 16:22:53 +02002298static void mem_begin(MemoryListener *listener)
2299{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002300 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002301 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2302 uint16_t n;
2303
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002304 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002305 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002306 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002307 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002308 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002309 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002310 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002311 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002312
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002313 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002314 d->as = as;
2315 as->next_dispatch = d;
2316}
2317
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002318static void address_space_dispatch_free(AddressSpaceDispatch *d)
2319{
2320 phys_sections_free(&d->map);
2321 g_free(d);
2322}
2323
Paolo Bonzini00752702013-05-29 12:13:54 +02002324static void mem_commit(MemoryListener *listener)
2325{
2326 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002327 AddressSpaceDispatch *cur = as->dispatch;
2328 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002329
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002330 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002331
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002332 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002333 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002334 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002335 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002336}
2337
Avi Kivity1d711482012-10-02 18:54:45 +02002338static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002339{
Peter Maydell32857f42015-10-01 15:29:50 +01002340 CPUAddressSpace *cpuas;
2341 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002342
2343 /* since each CPU stores ram addresses in its TLB cache, we must
2344 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002345 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2346 cpu_reloading_memory_map();
2347 /* The CPU and TLB are protected by the iothread lock.
2348 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2349 * may have split the RCU critical section.
2350 */
2351 d = atomic_rcu_read(&cpuas->as->dispatch);
2352 cpuas->memory_dispatch = d;
2353 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002354}
2355
Avi Kivityac1970f2012-10-03 16:22:53 +02002356void address_space_init_dispatch(AddressSpace *as)
2357{
Paolo Bonzini00752702013-05-29 12:13:54 +02002358 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002359 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002360 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002361 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002362 .region_add = mem_add,
2363 .region_nop = mem_add,
2364 .priority = 0,
2365 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002366 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002367}
2368
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002369void address_space_unregister(AddressSpace *as)
2370{
2371 memory_listener_unregister(&as->dispatch_listener);
2372}
2373
Avi Kivity83f3c252012-10-07 12:59:55 +02002374void address_space_destroy_dispatch(AddressSpace *as)
2375{
2376 AddressSpaceDispatch *d = as->dispatch;
2377
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002378 atomic_rcu_set(&as->dispatch, NULL);
2379 if (d) {
2380 call_rcu(d, address_space_dispatch_free, rcu);
2381 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002382}
2383
Avi Kivity62152b82011-07-26 14:26:14 +03002384static void memory_map_init(void)
2385{
Anthony Liguori7267c092011-08-20 22:09:37 -05002386 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002387
Paolo Bonzini57271d62013-11-07 17:14:37 +01002388 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002389 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002390
Anthony Liguori7267c092011-08-20 22:09:37 -05002391 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002392 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2393 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002394 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002395}
2396
2397MemoryRegion *get_system_memory(void)
2398{
2399 return system_memory;
2400}
2401
Avi Kivity309cb472011-08-08 16:09:03 +03002402MemoryRegion *get_system_io(void)
2403{
2404 return system_io;
2405}
2406
pbrooke2eef172008-06-08 01:09:01 +00002407#endif /* !defined(CONFIG_USER_ONLY) */
2408
bellard13eb76e2004-01-24 15:23:36 +00002409/* physical memory access (slow version, mainly for debug) */
2410#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002411int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002412 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002413{
2414 int l, flags;
2415 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002416 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002417
2418 while (len > 0) {
2419 page = addr & TARGET_PAGE_MASK;
2420 l = (page + TARGET_PAGE_SIZE) - addr;
2421 if (l > len)
2422 l = len;
2423 flags = page_get_flags(page);
2424 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002425 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002426 if (is_write) {
2427 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002428 return -1;
bellard579a97f2007-11-11 14:26:47 +00002429 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002430 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002431 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002432 memcpy(p, buf, l);
2433 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002434 } else {
2435 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002436 return -1;
bellard579a97f2007-11-11 14:26:47 +00002437 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002438 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002439 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002440 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002441 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002442 }
2443 len -= l;
2444 buf += l;
2445 addr += l;
2446 }
Paul Brooka68fe892010-03-01 00:08:59 +00002447 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002448}
bellard8df1cd02005-01-28 22:37:22 +00002449
bellard13eb76e2004-01-24 15:23:36 +00002450#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002451
Paolo Bonzini845b6212015-03-23 11:45:53 +01002452static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002453 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002454{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002455 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002456 addr += memory_region_get_ram_addr(mr);
2457
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002458 /* No early return if dirty_log_mask is or becomes 0, because
2459 * cpu_physical_memory_set_dirty_range will still call
2460 * xen_modified_memory.
2461 */
2462 if (dirty_log_mask) {
2463 dirty_log_mask =
2464 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002465 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002466 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2467 tb_invalidate_phys_range(addr, addr + length);
2468 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2469 }
2470 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002471}
2472
Richard Henderson23326162013-07-08 14:55:59 -07002473static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002474{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002475 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002476
2477 /* Regions are assumed to support 1-4 byte accesses unless
2478 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002479 if (access_size_max == 0) {
2480 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002481 }
Richard Henderson23326162013-07-08 14:55:59 -07002482
2483 /* Bound the maximum access by the alignment of the address. */
2484 if (!mr->ops->impl.unaligned) {
2485 unsigned align_size_max = addr & -addr;
2486 if (align_size_max != 0 && align_size_max < access_size_max) {
2487 access_size_max = align_size_max;
2488 }
2489 }
2490
2491 /* Don't attempt accesses larger than the maximum. */
2492 if (l > access_size_max) {
2493 l = access_size_max;
2494 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002495 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002496
2497 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002498}
2499
Jan Kiszka4840f102015-06-18 18:47:22 +02002500static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002501{
Jan Kiszka4840f102015-06-18 18:47:22 +02002502 bool unlocked = !qemu_mutex_iothread_locked();
2503 bool release_lock = false;
2504
2505 if (unlocked && mr->global_locking) {
2506 qemu_mutex_lock_iothread();
2507 unlocked = false;
2508 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002509 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002510 if (mr->flush_coalesced_mmio) {
2511 if (unlocked) {
2512 qemu_mutex_lock_iothread();
2513 }
2514 qemu_flush_coalesced_mmio_buffer();
2515 if (unlocked) {
2516 qemu_mutex_unlock_iothread();
2517 }
2518 }
2519
2520 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002521}
2522
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002523/* Called within RCU critical section. */
2524static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2525 MemTxAttrs attrs,
2526 const uint8_t *buf,
2527 int len, hwaddr addr1,
2528 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002529{
bellard13eb76e2004-01-24 15:23:36 +00002530 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002531 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002532 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002533 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002534
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002535 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002536 if (!memory_access_is_direct(mr, true)) {
2537 release_lock |= prepare_mmio_access(mr);
2538 l = memory_access_size(mr, l, addr1);
2539 /* XXX: could force current_cpu to NULL to avoid
2540 potential bugs */
2541 switch (l) {
2542 case 8:
2543 /* 64 bit write access */
2544 val = ldq_p(buf);
2545 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2546 attrs);
2547 break;
2548 case 4:
2549 /* 32 bit write access */
2550 val = ldl_p(buf);
2551 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2552 attrs);
2553 break;
2554 case 2:
2555 /* 16 bit write access */
2556 val = lduw_p(buf);
2557 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2558 attrs);
2559 break;
2560 case 1:
2561 /* 8 bit write access */
2562 val = ldub_p(buf);
2563 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2564 attrs);
2565 break;
2566 default:
2567 abort();
bellard13eb76e2004-01-24 15:23:36 +00002568 }
2569 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002570 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002571 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002572 memcpy(ptr, buf, l);
2573 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002574 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002575
2576 if (release_lock) {
2577 qemu_mutex_unlock_iothread();
2578 release_lock = false;
2579 }
2580
bellard13eb76e2004-01-24 15:23:36 +00002581 len -= l;
2582 buf += l;
2583 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002584
2585 if (!len) {
2586 break;
2587 }
2588
2589 l = len;
2590 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002591 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002592
Peter Maydell3b643492015-04-26 16:49:23 +01002593 return result;
bellard13eb76e2004-01-24 15:23:36 +00002594}
bellard8df1cd02005-01-28 22:37:22 +00002595
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002596MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2597 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002598{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002599 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002600 hwaddr addr1;
2601 MemoryRegion *mr;
2602 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002603
2604 if (len > 0) {
2605 rcu_read_lock();
2606 l = len;
2607 mr = address_space_translate(as, addr, &addr1, &l, true);
2608 result = address_space_write_continue(as, addr, attrs, buf, len,
2609 addr1, l, mr);
2610 rcu_read_unlock();
2611 }
2612
2613 return result;
2614}
2615
2616/* Called within RCU critical section. */
2617MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2618 MemTxAttrs attrs, uint8_t *buf,
2619 int len, hwaddr addr1, hwaddr l,
2620 MemoryRegion *mr)
2621{
2622 uint8_t *ptr;
2623 uint64_t val;
2624 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002625 bool release_lock = false;
2626
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002627 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002628 if (!memory_access_is_direct(mr, false)) {
2629 /* I/O case */
2630 release_lock |= prepare_mmio_access(mr);
2631 l = memory_access_size(mr, l, addr1);
2632 switch (l) {
2633 case 8:
2634 /* 64 bit read access */
2635 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2636 attrs);
2637 stq_p(buf, val);
2638 break;
2639 case 4:
2640 /* 32 bit read access */
2641 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2642 attrs);
2643 stl_p(buf, val);
2644 break;
2645 case 2:
2646 /* 16 bit read access */
2647 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2648 attrs);
2649 stw_p(buf, val);
2650 break;
2651 case 1:
2652 /* 8 bit read access */
2653 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2654 attrs);
2655 stb_p(buf, val);
2656 break;
2657 default:
2658 abort();
2659 }
2660 } else {
2661 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002662 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002663 memcpy(buf, ptr, l);
2664 }
2665
2666 if (release_lock) {
2667 qemu_mutex_unlock_iothread();
2668 release_lock = false;
2669 }
2670
2671 len -= l;
2672 buf += l;
2673 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002674
2675 if (!len) {
2676 break;
2677 }
2678
2679 l = len;
2680 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002681 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002682
2683 return result;
2684}
2685
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002686MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2687 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002688{
2689 hwaddr l;
2690 hwaddr addr1;
2691 MemoryRegion *mr;
2692 MemTxResult result = MEMTX_OK;
2693
2694 if (len > 0) {
2695 rcu_read_lock();
2696 l = len;
2697 mr = address_space_translate(as, addr, &addr1, &l, false);
2698 result = address_space_read_continue(as, addr, attrs, buf, len,
2699 addr1, l, mr);
2700 rcu_read_unlock();
2701 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002702
2703 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002704}
2705
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002706MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2707 uint8_t *buf, int len, bool is_write)
2708{
2709 if (is_write) {
2710 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2711 } else {
2712 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2713 }
2714}
Avi Kivityac1970f2012-10-03 16:22:53 +02002715
Avi Kivitya8170e52012-10-23 12:30:10 +02002716void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002717 int len, int is_write)
2718{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002719 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2720 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002721}
2722
Alexander Graf582b55a2013-12-11 14:17:44 +01002723enum write_rom_type {
2724 WRITE_DATA,
2725 FLUSH_CACHE,
2726};
2727
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002728static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002729 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002730{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002731 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002732 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002733 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002734 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002735
Paolo Bonzini41063e12015-03-18 14:21:43 +01002736 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002737 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002738 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002739 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002740
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002741 if (!(memory_region_is_ram(mr) ||
2742 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002743 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002744 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002745 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002746 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002747 switch (type) {
2748 case WRITE_DATA:
2749 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002750 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002751 break;
2752 case FLUSH_CACHE:
2753 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2754 break;
2755 }
bellardd0ecd2a2006-04-23 17:14:48 +00002756 }
2757 len -= l;
2758 buf += l;
2759 addr += l;
2760 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002761 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002762}
2763
Alexander Graf582b55a2013-12-11 14:17:44 +01002764/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002765void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002766 const uint8_t *buf, int len)
2767{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002768 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002769}
2770
2771void cpu_flush_icache_range(hwaddr start, int len)
2772{
2773 /*
2774 * This function should do the same thing as an icache flush that was
2775 * triggered from within the guest. For TCG we are always cache coherent,
2776 * so there is no need to flush anything. For KVM / Xen we need to flush
2777 * the host's instruction cache at least.
2778 */
2779 if (tcg_enabled()) {
2780 return;
2781 }
2782
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002783 cpu_physical_memory_write_rom_internal(&address_space_memory,
2784 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002785}
2786
aliguori6d16c2f2009-01-22 16:59:11 +00002787typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002788 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002789 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002790 hwaddr addr;
2791 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002792 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002793} BounceBuffer;
2794
2795static BounceBuffer bounce;
2796
aliguoriba223c22009-01-22 16:59:16 +00002797typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002798 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002799 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002800} MapClient;
2801
Fam Zheng38e047b2015-03-16 17:03:35 +08002802QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002803static QLIST_HEAD(map_client_list, MapClient) map_client_list
2804 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002805
Fam Zhenge95205e2015-03-16 17:03:37 +08002806static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002807{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002808 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002809 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002810}
2811
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002812static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002813{
2814 MapClient *client;
2815
Blue Swirl72cf2d42009-09-12 07:36:22 +00002816 while (!QLIST_EMPTY(&map_client_list)) {
2817 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002818 qemu_bh_schedule(client->bh);
2819 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002820 }
2821}
2822
Fam Zhenge95205e2015-03-16 17:03:37 +08002823void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002824{
2825 MapClient *client = g_malloc(sizeof(*client));
2826
Fam Zheng38e047b2015-03-16 17:03:35 +08002827 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002828 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002829 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002830 if (!atomic_read(&bounce.in_use)) {
2831 cpu_notify_map_clients_locked();
2832 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002833 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002834}
2835
Fam Zheng38e047b2015-03-16 17:03:35 +08002836void cpu_exec_init_all(void)
2837{
2838 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002839 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002840 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002841 qemu_mutex_init(&map_client_list_lock);
2842}
2843
Fam Zhenge95205e2015-03-16 17:03:37 +08002844void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002845{
Fam Zhenge95205e2015-03-16 17:03:37 +08002846 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002847
Fam Zhenge95205e2015-03-16 17:03:37 +08002848 qemu_mutex_lock(&map_client_list_lock);
2849 QLIST_FOREACH(client, &map_client_list, link) {
2850 if (client->bh == bh) {
2851 cpu_unregister_map_client_do(client);
2852 break;
2853 }
2854 }
2855 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002856}
2857
2858static void cpu_notify_map_clients(void)
2859{
Fam Zheng38e047b2015-03-16 17:03:35 +08002860 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002861 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002862 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002863}
2864
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002865bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2866{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002867 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002868 hwaddr l, xlat;
2869
Paolo Bonzini41063e12015-03-18 14:21:43 +01002870 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002871 while (len > 0) {
2872 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002873 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2874 if (!memory_access_is_direct(mr, is_write)) {
2875 l = memory_access_size(mr, l, addr);
2876 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002877 return false;
2878 }
2879 }
2880
2881 len -= l;
2882 addr += l;
2883 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002884 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002885 return true;
2886}
2887
aliguori6d16c2f2009-01-22 16:59:11 +00002888/* Map a physical memory region into a host virtual address.
2889 * May map a subset of the requested range, given by and returned in *plen.
2890 * May return NULL if resources needed to perform the mapping are exhausted.
2891 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002892 * Use cpu_register_map_client() to know when retrying the map operation is
2893 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002894 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002895void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002896 hwaddr addr,
2897 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002898 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002899{
Avi Kivitya8170e52012-10-23 12:30:10 +02002900 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002901 hwaddr done = 0;
2902 hwaddr l, xlat, base;
2903 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002904 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002905
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002906 if (len == 0) {
2907 return NULL;
2908 }
aliguori6d16c2f2009-01-22 16:59:11 +00002909
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002910 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002911 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002912 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002913
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002914 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002915 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002916 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002917 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002918 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002919 /* Avoid unbounded allocations */
2920 l = MIN(l, TARGET_PAGE_SIZE);
2921 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002922 bounce.addr = addr;
2923 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002924
2925 memory_region_ref(mr);
2926 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002927 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002928 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2929 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002930 }
aliguori6d16c2f2009-01-22 16:59:11 +00002931
Paolo Bonzini41063e12015-03-18 14:21:43 +01002932 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002933 *plen = l;
2934 return bounce.buffer;
2935 }
2936
2937 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002938
2939 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002940 len -= l;
2941 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002942 done += l;
2943 if (len == 0) {
2944 break;
2945 }
2946
2947 l = len;
2948 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2949 if (this_mr != mr || xlat != base + done) {
2950 break;
2951 }
aliguori6d16c2f2009-01-22 16:59:11 +00002952 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002953
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002954 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002955 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002956 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002957 rcu_read_unlock();
2958
2959 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002960}
2961
Avi Kivityac1970f2012-10-03 16:22:53 +02002962/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002963 * Will also mark the memory as dirty if is_write == 1. access_len gives
2964 * the amount of memory that was actually read or written by the caller.
2965 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002966void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2967 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002968{
2969 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002970 MemoryRegion *mr;
2971 ram_addr_t addr1;
2972
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002973 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002974 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002975 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002976 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002977 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002978 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002979 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002980 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002981 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002982 return;
2983 }
2984 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002985 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2986 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002987 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002988 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002989 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002990 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002991 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002992 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002993}
bellardd0ecd2a2006-04-23 17:14:48 +00002994
Avi Kivitya8170e52012-10-23 12:30:10 +02002995void *cpu_physical_memory_map(hwaddr addr,
2996 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002997 int is_write)
2998{
2999 return address_space_map(&address_space_memory, addr, plen, is_write);
3000}
3001
Avi Kivitya8170e52012-10-23 12:30:10 +02003002void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3003 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003004{
3005 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3006}
3007
bellard8df1cd02005-01-28 22:37:22 +00003008/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003009static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3010 MemTxAttrs attrs,
3011 MemTxResult *result,
3012 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003013{
bellard8df1cd02005-01-28 22:37:22 +00003014 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003015 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003016 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003017 hwaddr l = 4;
3018 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003019 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003020 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003021
Paolo Bonzini41063e12015-03-18 14:21:43 +01003022 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003023 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003024 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003025 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003026
bellard8df1cd02005-01-28 22:37:22 +00003027 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003028 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003029#if defined(TARGET_WORDS_BIGENDIAN)
3030 if (endian == DEVICE_LITTLE_ENDIAN) {
3031 val = bswap32(val);
3032 }
3033#else
3034 if (endian == DEVICE_BIG_ENDIAN) {
3035 val = bswap32(val);
3036 }
3037#endif
bellard8df1cd02005-01-28 22:37:22 +00003038 } else {
3039 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003040 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003041 switch (endian) {
3042 case DEVICE_LITTLE_ENDIAN:
3043 val = ldl_le_p(ptr);
3044 break;
3045 case DEVICE_BIG_ENDIAN:
3046 val = ldl_be_p(ptr);
3047 break;
3048 default:
3049 val = ldl_p(ptr);
3050 break;
3051 }
Peter Maydell50013112015-04-26 16:49:24 +01003052 r = MEMTX_OK;
3053 }
3054 if (result) {
3055 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003056 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003057 if (release_lock) {
3058 qemu_mutex_unlock_iothread();
3059 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003060 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003061 return val;
3062}
3063
Peter Maydell50013112015-04-26 16:49:24 +01003064uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3065 MemTxAttrs attrs, MemTxResult *result)
3066{
3067 return address_space_ldl_internal(as, addr, attrs, result,
3068 DEVICE_NATIVE_ENDIAN);
3069}
3070
3071uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3072 MemTxAttrs attrs, MemTxResult *result)
3073{
3074 return address_space_ldl_internal(as, addr, attrs, result,
3075 DEVICE_LITTLE_ENDIAN);
3076}
3077
3078uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3079 MemTxAttrs attrs, MemTxResult *result)
3080{
3081 return address_space_ldl_internal(as, addr, attrs, result,
3082 DEVICE_BIG_ENDIAN);
3083}
3084
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003085uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003086{
Peter Maydell50013112015-04-26 16:49:24 +01003087 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003088}
3089
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003090uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003091{
Peter Maydell50013112015-04-26 16:49:24 +01003092 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003093}
3094
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003095uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003096{
Peter Maydell50013112015-04-26 16:49:24 +01003097 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003098}
3099
bellard84b7b8e2005-11-28 21:19:04 +00003100/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003101static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3102 MemTxAttrs attrs,
3103 MemTxResult *result,
3104 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003105{
bellard84b7b8e2005-11-28 21:19:04 +00003106 uint8_t *ptr;
3107 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003108 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003109 hwaddr l = 8;
3110 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003111 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003112 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003113
Paolo Bonzini41063e12015-03-18 14:21:43 +01003114 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003115 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003116 false);
3117 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003118 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003119
bellard84b7b8e2005-11-28 21:19:04 +00003120 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003121 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003122#if defined(TARGET_WORDS_BIGENDIAN)
3123 if (endian == DEVICE_LITTLE_ENDIAN) {
3124 val = bswap64(val);
3125 }
3126#else
3127 if (endian == DEVICE_BIG_ENDIAN) {
3128 val = bswap64(val);
3129 }
3130#endif
bellard84b7b8e2005-11-28 21:19:04 +00003131 } else {
3132 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003133 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003134 switch (endian) {
3135 case DEVICE_LITTLE_ENDIAN:
3136 val = ldq_le_p(ptr);
3137 break;
3138 case DEVICE_BIG_ENDIAN:
3139 val = ldq_be_p(ptr);
3140 break;
3141 default:
3142 val = ldq_p(ptr);
3143 break;
3144 }
Peter Maydell50013112015-04-26 16:49:24 +01003145 r = MEMTX_OK;
3146 }
3147 if (result) {
3148 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003149 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003150 if (release_lock) {
3151 qemu_mutex_unlock_iothread();
3152 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003153 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003154 return val;
3155}
3156
Peter Maydell50013112015-04-26 16:49:24 +01003157uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3158 MemTxAttrs attrs, MemTxResult *result)
3159{
3160 return address_space_ldq_internal(as, addr, attrs, result,
3161 DEVICE_NATIVE_ENDIAN);
3162}
3163
3164uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3165 MemTxAttrs attrs, MemTxResult *result)
3166{
3167 return address_space_ldq_internal(as, addr, attrs, result,
3168 DEVICE_LITTLE_ENDIAN);
3169}
3170
3171uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3172 MemTxAttrs attrs, MemTxResult *result)
3173{
3174 return address_space_ldq_internal(as, addr, attrs, result,
3175 DEVICE_BIG_ENDIAN);
3176}
3177
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003178uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003179{
Peter Maydell50013112015-04-26 16:49:24 +01003180 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003181}
3182
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003183uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003184{
Peter Maydell50013112015-04-26 16:49:24 +01003185 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003186}
3187
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003188uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003189{
Peter Maydell50013112015-04-26 16:49:24 +01003190 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003191}
3192
bellardaab33092005-10-30 20:48:42 +00003193/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003194uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3195 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003196{
3197 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003198 MemTxResult r;
3199
3200 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3201 if (result) {
3202 *result = r;
3203 }
bellardaab33092005-10-30 20:48:42 +00003204 return val;
3205}
3206
Peter Maydell50013112015-04-26 16:49:24 +01003207uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3208{
3209 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3210}
3211
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003212/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003213static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3214 hwaddr addr,
3215 MemTxAttrs attrs,
3216 MemTxResult *result,
3217 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003218{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003219 uint8_t *ptr;
3220 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003221 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003222 hwaddr l = 2;
3223 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003224 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003225 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003226
Paolo Bonzini41063e12015-03-18 14:21:43 +01003227 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003228 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003229 false);
3230 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003231 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003232
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003233 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003234 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003235#if defined(TARGET_WORDS_BIGENDIAN)
3236 if (endian == DEVICE_LITTLE_ENDIAN) {
3237 val = bswap16(val);
3238 }
3239#else
3240 if (endian == DEVICE_BIG_ENDIAN) {
3241 val = bswap16(val);
3242 }
3243#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003244 } else {
3245 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003246 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003247 switch (endian) {
3248 case DEVICE_LITTLE_ENDIAN:
3249 val = lduw_le_p(ptr);
3250 break;
3251 case DEVICE_BIG_ENDIAN:
3252 val = lduw_be_p(ptr);
3253 break;
3254 default:
3255 val = lduw_p(ptr);
3256 break;
3257 }
Peter Maydell50013112015-04-26 16:49:24 +01003258 r = MEMTX_OK;
3259 }
3260 if (result) {
3261 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003262 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003263 if (release_lock) {
3264 qemu_mutex_unlock_iothread();
3265 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003266 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003267 return val;
bellardaab33092005-10-30 20:48:42 +00003268}
3269
Peter Maydell50013112015-04-26 16:49:24 +01003270uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3271 MemTxAttrs attrs, MemTxResult *result)
3272{
3273 return address_space_lduw_internal(as, addr, attrs, result,
3274 DEVICE_NATIVE_ENDIAN);
3275}
3276
3277uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3278 MemTxAttrs attrs, MemTxResult *result)
3279{
3280 return address_space_lduw_internal(as, addr, attrs, result,
3281 DEVICE_LITTLE_ENDIAN);
3282}
3283
3284uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3285 MemTxAttrs attrs, MemTxResult *result)
3286{
3287 return address_space_lduw_internal(as, addr, attrs, result,
3288 DEVICE_BIG_ENDIAN);
3289}
3290
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003291uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003292{
Peter Maydell50013112015-04-26 16:49:24 +01003293 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003294}
3295
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003296uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003297{
Peter Maydell50013112015-04-26 16:49:24 +01003298 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003299}
3300
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003301uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003302{
Peter Maydell50013112015-04-26 16:49:24 +01003303 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003304}
3305
bellard8df1cd02005-01-28 22:37:22 +00003306/* warning: addr must be aligned. The ram page is not masked as dirty
3307 and the code inside is not invalidated. It is useful if the dirty
3308 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003309void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3310 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003311{
bellard8df1cd02005-01-28 22:37:22 +00003312 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003313 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003314 hwaddr l = 4;
3315 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003316 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003317 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003318 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003319
Paolo Bonzini41063e12015-03-18 14:21:43 +01003320 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003321 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003322 true);
3323 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003324 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003325
Peter Maydell50013112015-04-26 16:49:24 +01003326 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003327 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003328 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003329 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003330
Paolo Bonzini845b6212015-03-23 11:45:53 +01003331 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3332 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003333 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3334 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003335 r = MEMTX_OK;
3336 }
3337 if (result) {
3338 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003339 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003340 if (release_lock) {
3341 qemu_mutex_unlock_iothread();
3342 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003343 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003344}
3345
Peter Maydell50013112015-04-26 16:49:24 +01003346void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3347{
3348 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3349}
3350
bellard8df1cd02005-01-28 22:37:22 +00003351/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003352static inline void address_space_stl_internal(AddressSpace *as,
3353 hwaddr addr, uint32_t val,
3354 MemTxAttrs attrs,
3355 MemTxResult *result,
3356 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003357{
bellard8df1cd02005-01-28 22:37:22 +00003358 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003359 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003360 hwaddr l = 4;
3361 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003362 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003363 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003364
Paolo Bonzini41063e12015-03-18 14:21:43 +01003365 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003366 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003367 true);
3368 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003369 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003370
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003371#if defined(TARGET_WORDS_BIGENDIAN)
3372 if (endian == DEVICE_LITTLE_ENDIAN) {
3373 val = bswap32(val);
3374 }
3375#else
3376 if (endian == DEVICE_BIG_ENDIAN) {
3377 val = bswap32(val);
3378 }
3379#endif
Peter Maydell50013112015-04-26 16:49:24 +01003380 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003381 } else {
bellard8df1cd02005-01-28 22:37:22 +00003382 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003383 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003384 switch (endian) {
3385 case DEVICE_LITTLE_ENDIAN:
3386 stl_le_p(ptr, val);
3387 break;
3388 case DEVICE_BIG_ENDIAN:
3389 stl_be_p(ptr, val);
3390 break;
3391 default:
3392 stl_p(ptr, val);
3393 break;
3394 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003395 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003396 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003397 }
Peter Maydell50013112015-04-26 16:49:24 +01003398 if (result) {
3399 *result = r;
3400 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003401 if (release_lock) {
3402 qemu_mutex_unlock_iothread();
3403 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003404 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003405}
3406
3407void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3408 MemTxAttrs attrs, MemTxResult *result)
3409{
3410 address_space_stl_internal(as, addr, val, attrs, result,
3411 DEVICE_NATIVE_ENDIAN);
3412}
3413
3414void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3415 MemTxAttrs attrs, MemTxResult *result)
3416{
3417 address_space_stl_internal(as, addr, val, attrs, result,
3418 DEVICE_LITTLE_ENDIAN);
3419}
3420
3421void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3422 MemTxAttrs attrs, MemTxResult *result)
3423{
3424 address_space_stl_internal(as, addr, val, attrs, result,
3425 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003426}
3427
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003428void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003429{
Peter Maydell50013112015-04-26 16:49:24 +01003430 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003431}
3432
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003433void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003434{
Peter Maydell50013112015-04-26 16:49:24 +01003435 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003436}
3437
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003438void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003439{
Peter Maydell50013112015-04-26 16:49:24 +01003440 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003441}
3442
bellardaab33092005-10-30 20:48:42 +00003443/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003444void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3445 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003446{
3447 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003448 MemTxResult r;
3449
3450 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3451 if (result) {
3452 *result = r;
3453 }
3454}
3455
3456void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3457{
3458 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003459}
3460
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003461/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003462static inline void address_space_stw_internal(AddressSpace *as,
3463 hwaddr addr, uint32_t val,
3464 MemTxAttrs attrs,
3465 MemTxResult *result,
3466 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003467{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003468 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003469 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003470 hwaddr l = 2;
3471 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003472 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003473 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003474
Paolo Bonzini41063e12015-03-18 14:21:43 +01003475 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003476 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003477 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003478 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003479
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003480#if defined(TARGET_WORDS_BIGENDIAN)
3481 if (endian == DEVICE_LITTLE_ENDIAN) {
3482 val = bswap16(val);
3483 }
3484#else
3485 if (endian == DEVICE_BIG_ENDIAN) {
3486 val = bswap16(val);
3487 }
3488#endif
Peter Maydell50013112015-04-26 16:49:24 +01003489 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003490 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003491 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003492 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003493 switch (endian) {
3494 case DEVICE_LITTLE_ENDIAN:
3495 stw_le_p(ptr, val);
3496 break;
3497 case DEVICE_BIG_ENDIAN:
3498 stw_be_p(ptr, val);
3499 break;
3500 default:
3501 stw_p(ptr, val);
3502 break;
3503 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003504 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003505 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003506 }
Peter Maydell50013112015-04-26 16:49:24 +01003507 if (result) {
3508 *result = r;
3509 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003510 if (release_lock) {
3511 qemu_mutex_unlock_iothread();
3512 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003513 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003514}
3515
3516void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3517 MemTxAttrs attrs, MemTxResult *result)
3518{
3519 address_space_stw_internal(as, addr, val, attrs, result,
3520 DEVICE_NATIVE_ENDIAN);
3521}
3522
3523void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3524 MemTxAttrs attrs, MemTxResult *result)
3525{
3526 address_space_stw_internal(as, addr, val, attrs, result,
3527 DEVICE_LITTLE_ENDIAN);
3528}
3529
3530void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3531 MemTxAttrs attrs, MemTxResult *result)
3532{
3533 address_space_stw_internal(as, addr, val, attrs, result,
3534 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003535}
3536
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003537void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003538{
Peter Maydell50013112015-04-26 16:49:24 +01003539 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003540}
3541
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003542void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003543{
Peter Maydell50013112015-04-26 16:49:24 +01003544 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003545}
3546
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003547void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003548{
Peter Maydell50013112015-04-26 16:49:24 +01003549 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003550}
3551
bellardaab33092005-10-30 20:48:42 +00003552/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003553void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3554 MemTxAttrs attrs, MemTxResult *result)
3555{
3556 MemTxResult r;
3557 val = tswap64(val);
3558 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3559 if (result) {
3560 *result = r;
3561 }
3562}
3563
3564void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3565 MemTxAttrs attrs, MemTxResult *result)
3566{
3567 MemTxResult r;
3568 val = cpu_to_le64(val);
3569 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3570 if (result) {
3571 *result = r;
3572 }
3573}
3574void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3575 MemTxAttrs attrs, MemTxResult *result)
3576{
3577 MemTxResult r;
3578 val = cpu_to_be64(val);
3579 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3580 if (result) {
3581 *result = r;
3582 }
3583}
3584
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003585void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003586{
Peter Maydell50013112015-04-26 16:49:24 +01003587 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003588}
3589
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003590void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003591{
Peter Maydell50013112015-04-26 16:49:24 +01003592 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003593}
3594
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003595void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003596{
Peter Maydell50013112015-04-26 16:49:24 +01003597 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003598}
3599
aliguori5e2972f2009-03-28 17:51:36 +00003600/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003601int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003602 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003603{
3604 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003605 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003606 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003607
3608 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003609 int asidx;
3610 MemTxAttrs attrs;
3611
bellard13eb76e2004-01-24 15:23:36 +00003612 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003613 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3614 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003615 /* if no physical page mapped, return an error */
3616 if (phys_addr == -1)
3617 return -1;
3618 l = (page + TARGET_PAGE_SIZE) - addr;
3619 if (l > len)
3620 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003621 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003622 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003623 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3624 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003625 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003626 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3627 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003628 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003629 }
bellard13eb76e2004-01-24 15:23:36 +00003630 len -= l;
3631 buf += l;
3632 addr += l;
3633 }
3634 return 0;
3635}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003636
3637/*
3638 * Allows code that needs to deal with migration bitmaps etc to still be built
3639 * target independent.
3640 */
3641size_t qemu_target_page_bits(void)
3642{
3643 return TARGET_PAGE_BITS;
3644}
3645
Paul Brooka68fe892010-03-01 00:08:59 +00003646#endif
bellard13eb76e2004-01-24 15:23:36 +00003647
Blue Swirl8e4a4242013-01-06 18:30:17 +00003648/*
3649 * A helper function for the _utterly broken_ virtio device model to find out if
3650 * it's running on a big endian machine. Don't do this at home kids!
3651 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003652bool target_words_bigendian(void);
3653bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003654{
3655#if defined(TARGET_WORDS_BIGENDIAN)
3656 return true;
3657#else
3658 return false;
3659#endif
3660}
3661
Wen Congyang76f35532012-05-07 12:04:18 +08003662#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003663bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003664{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003665 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003666 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003667 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003668
Paolo Bonzini41063e12015-03-18 14:21:43 +01003669 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003670 mr = address_space_translate(&address_space_memory,
3671 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003672
Paolo Bonzini41063e12015-03-18 14:21:43 +01003673 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3674 rcu_read_unlock();
3675 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003676}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003677
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003678int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003679{
3680 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003681 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003682
Mike Day0dc3f442013-09-05 14:41:35 -04003683 rcu_read_lock();
3684 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003685 ret = func(block->idstr, block->host, block->offset,
3686 block->used_length, opaque);
3687 if (ret) {
3688 break;
3689 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003690 }
Mike Day0dc3f442013-09-05 14:41:35 -04003691 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003692 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003693}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003694#endif