blob: 2f57c624a5bd4792bda6539527766f5d2f46bf8a [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Andreas Färberbdc44642013-06-24 23:50:24 +020096struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000097/* current CPU in the current thread. It is only valid inside
98 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +020099__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000100/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000101 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000102 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100103int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000104
pbrooke2eef172008-06-08 01:09:01 +0000105#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200106
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200107typedef struct PhysPageEntry PhysPageEntry;
108
109struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200110 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200112 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200113 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200114};
115
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200116#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
117
Paolo Bonzini03f49952013-11-07 17:14:36 +0100118/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100119#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100120
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200121#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100122#define P_L2_SIZE (1 << P_L2_BITS)
123
124#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
125
126typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200127
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200128typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100129 struct rcu_head rcu;
130
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200131 unsigned sections_nb;
132 unsigned sections_nb_alloc;
133 unsigned nodes_nb;
134 unsigned nodes_nb_alloc;
135 Node *nodes;
136 MemoryRegionSection *sections;
137} PhysPageMap;
138
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200139struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100140 struct rcu_head rcu;
141
Fam Zheng729633c2016-03-01 14:18:24 +0800142 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143 /* This is a multi-level map on the physical address space.
144 * The bottom level has pointers to MemoryRegionSections.
145 */
146 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200147 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200149};
150
Jan Kiszka90260c62013-05-26 21:46:51 +0200151#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
152typedef struct subpage_t {
153 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200154 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200155 hwaddr base;
156 uint16_t sub_section[TARGET_PAGE_SIZE];
157} subpage_t;
158
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200159#define PHYS_SECTION_UNASSIGNED 0
160#define PHYS_SECTION_NOTDIRTY 1
161#define PHYS_SECTION_ROM 2
162#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200163
pbrooke2eef172008-06-08 01:09:01 +0000164static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300165static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000166static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000167
Avi Kivity1ec9b902012-01-02 12:47:48 +0200168static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100169
170/**
171 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
172 * @cpu: the CPU whose AddressSpace this is
173 * @as: the AddressSpace itself
174 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
175 * @tcg_as_listener: listener for tracking changes to the AddressSpace
176 */
177struct CPUAddressSpace {
178 CPUState *cpu;
179 AddressSpace *as;
180 struct AddressSpaceDispatch *memory_dispatch;
181 MemoryListener tcg_as_listener;
182};
183
pbrook6658ffb2007-03-16 23:58:11 +0000184#endif
bellard54936002003-05-13 00:25:15 +0000185
Paul Brook6d9a1302010-02-28 23:55:53 +0000186#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200188static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200189{
Peter Lieven101420b2016-07-15 12:03:50 +0200190 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200192 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200193 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
194 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200195 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200196 }
197}
198
Paolo Bonzinidb946042015-05-21 15:12:29 +0200199static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200200{
201 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200202 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200203 PhysPageEntry e;
204 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200205
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200206 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200207 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200210
211 e.skip = leaf ? 0 : 1;
212 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200214 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200215 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200216 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200217}
218
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200219static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
220 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200221 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200222{
223 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100224 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200225
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200226 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200227 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200228 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200229 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100230 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200231
Paolo Bonzini03f49952013-11-07 17:14:36 +0100232 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200233 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200234 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200235 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200236 *index += step;
237 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200238 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200239 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200240 }
241 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200242 }
243}
244
Avi Kivityac1970f2012-10-03 16:22:53 +0200245static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200246 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200247 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000248{
Avi Kivity29990972012-02-13 20:21:20 +0200249 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200250 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000251
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200252 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000253}
254
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200255/* Compact a non leaf page entry. Simply detect that the entry has a single child,
256 * and update our entry so we can skip it and go directly to the destination.
257 */
258static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
259{
260 unsigned valid_ptr = P_L2_SIZE;
261 int valid = 0;
262 PhysPageEntry *p;
263 int i;
264
265 if (lp->ptr == PHYS_MAP_NODE_NIL) {
266 return;
267 }
268
269 p = nodes[lp->ptr];
270 for (i = 0; i < P_L2_SIZE; i++) {
271 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
272 continue;
273 }
274
275 valid_ptr = i;
276 valid++;
277 if (p[i].skip) {
278 phys_page_compact(&p[i], nodes, compacted);
279 }
280 }
281
282 /* We can only compress if there's only one child. */
283 if (valid != 1) {
284 return;
285 }
286
287 assert(valid_ptr < P_L2_SIZE);
288
289 /* Don't compress if it won't fit in the # of bits we have. */
290 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
291 return;
292 }
293
294 lp->ptr = p[valid_ptr].ptr;
295 if (!p[valid_ptr].skip) {
296 /* If our only child is a leaf, make this a leaf. */
297 /* By design, we should have made this node a leaf to begin with so we
298 * should never reach here.
299 * But since it's so simple to handle this, let's do it just in case we
300 * change this rule.
301 */
302 lp->skip = 0;
303 } else {
304 lp->skip += p[valid_ptr].skip;
305 }
306}
307
308static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
309{
310 DECLARE_BITMAP(compacted, nodes_nb);
311
312 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200313 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200314 }
315}
316
Fam Zheng29cb5332016-03-01 14:18:23 +0800317static inline bool section_covers_addr(const MemoryRegionSection *section,
318 hwaddr addr)
319{
320 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
321 * the section must cover the entire address space.
322 */
323 return section->size.hi ||
324 range_covers_byte(section->offset_within_address_space,
325 section->size.lo, addr);
326}
327
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200328static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200329 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000330{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200331 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200332 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200333 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200334
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200335 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200336 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200337 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200338 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200339 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100340 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200341 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200342
Fam Zheng29cb5332016-03-01 14:18:23 +0800343 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 return &sections[lp.ptr];
345 } else {
346 return &sections[PHYS_SECTION_UNASSIGNED];
347 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200348}
349
Blue Swirle5548612012-04-21 13:08:33 +0000350bool memory_region_is_unassigned(MemoryRegion *mr)
351{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200352 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000353 && mr != &io_mem_watch;
354}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200355
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100356/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200357static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200358 hwaddr addr,
359 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200360{
Fam Zheng729633c2016-03-01 14:18:24 +0800361 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200362 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800363 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200364
Fam Zheng729633c2016-03-01 14:18:24 +0800365 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
366 section_covers_addr(section, addr)) {
367 update = false;
368 } else {
369 section = phys_page_find(d->phys_map, addr, d->map.nodes,
370 d->map.sections);
371 update = true;
372 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200373 if (resolve_subpage && section->mr->subpage) {
374 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200375 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200376 }
Fam Zheng729633c2016-03-01 14:18:24 +0800377 if (update) {
378 atomic_set(&d->mru_section, section);
379 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200380 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200381}
382
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100383/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200384static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200385address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200386 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387{
388 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200389 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100390 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200391
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200392 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200393 /* Compute offset within MemoryRegionSection */
394 addr -= section->offset_within_address_space;
395
396 /* Compute offset within MemoryRegion */
397 *xlat = addr + section->offset_within_region;
398
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200399 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200400
401 /* MMIO registers can be expected to perform full-width accesses based only
402 * on their address, without considering adjacent registers that could
403 * decode to completely different MemoryRegions. When such registers
404 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
405 * regions overlap wildly. For this reason we cannot clamp the accesses
406 * here.
407 *
408 * If the length is small (as is the case for address_space_ldl/stl),
409 * everything works fine. If the incoming length is large, however,
410 * the caller really has to do the clamping through memory_access_size.
411 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200412 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200413 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200414 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
415 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200416 return section;
417}
Jan Kiszka90260c62013-05-26 21:46:51 +0200418
Paolo Bonzini41063e12015-03-18 14:21:43 +0100419/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200420MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
421 hwaddr *xlat, hwaddr *plen,
422 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200423{
Avi Kivity30951152012-10-30 13:47:46 +0200424 IOMMUTLBEntry iotlb;
425 MemoryRegionSection *section;
426 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200427
428 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100429 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
430 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200431 mr = section->mr;
432
433 if (!mr->iommu_ops) {
434 break;
435 }
436
Le Tan8d7b8cb2014-08-16 13:55:37 +0800437 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200438 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
439 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700440 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200441 if (!(iotlb.perm & (1 << is_write))) {
442 mr = &io_mem_unassigned;
443 break;
444 }
445
446 as = iotlb.target_as;
447 }
448
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000449 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100450 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700451 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100452 }
453
Avi Kivity30951152012-10-30 13:47:46 +0200454 *xlat = addr;
455 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200456}
457
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100458/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200459MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000460address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200461 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200462{
Avi Kivity30951152012-10-30 13:47:46 +0200463 MemoryRegionSection *section;
Peter Maydelld7898cd2016-01-21 14:15:05 +0000464 AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch;
465
466 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200467
468 assert(!section->mr->iommu_ops);
469 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200470}
bellard9fa3e852004-01-04 18:06:42 +0000471#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000472
Andreas Färberb170fce2013-01-20 20:23:22 +0100473#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000474
Juan Quintelae59fb372009-09-29 22:48:21 +0200475static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200476{
Andreas Färber259186a2013-01-17 18:51:17 +0100477 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200478
aurel323098dba2009-03-07 21:28:24 +0000479 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
480 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100481 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100482 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000483
484 return 0;
485}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200486
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400487static int cpu_common_pre_load(void *opaque)
488{
489 CPUState *cpu = opaque;
490
Paolo Bonziniadee6422014-12-19 12:53:14 +0100491 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400492
493 return 0;
494}
495
496static bool cpu_common_exception_index_needed(void *opaque)
497{
498 CPUState *cpu = opaque;
499
Paolo Bonziniadee6422014-12-19 12:53:14 +0100500 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400501}
502
503static const VMStateDescription vmstate_cpu_common_exception_index = {
504 .name = "cpu_common/exception_index",
505 .version_id = 1,
506 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200507 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400508 .fields = (VMStateField[]) {
509 VMSTATE_INT32(exception_index, CPUState),
510 VMSTATE_END_OF_LIST()
511 }
512};
513
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300514static bool cpu_common_crash_occurred_needed(void *opaque)
515{
516 CPUState *cpu = opaque;
517
518 return cpu->crash_occurred;
519}
520
521static const VMStateDescription vmstate_cpu_common_crash_occurred = {
522 .name = "cpu_common/crash_occurred",
523 .version_id = 1,
524 .minimum_version_id = 1,
525 .needed = cpu_common_crash_occurred_needed,
526 .fields = (VMStateField[]) {
527 VMSTATE_BOOL(crash_occurred, CPUState),
528 VMSTATE_END_OF_LIST()
529 }
530};
531
Andreas Färber1a1562f2013-06-17 04:09:11 +0200532const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200533 .name = "cpu_common",
534 .version_id = 1,
535 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400536 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200537 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200538 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100539 VMSTATE_UINT32(halted, CPUState),
540 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200541 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400542 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200543 .subsections = (const VMStateDescription*[]) {
544 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300545 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200546 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200547 }
548};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200549
pbrook9656f322008-07-01 20:01:19 +0000550#endif
551
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100552CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400553{
Andreas Färberbdc44642013-06-24 23:50:24 +0200554 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400555
Andreas Färberbdc44642013-06-24 23:50:24 +0200556 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100557 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200558 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100559 }
Glauber Costa950f1472009-06-09 12:15:18 -0400560 }
561
Andreas Färberbdc44642013-06-24 23:50:24 +0200562 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400563}
564
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000565#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000566void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000567{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000568 CPUAddressSpace *newas;
569
570 /* Target code should have set num_ases before calling us */
571 assert(asidx < cpu->num_ases);
572
Peter Maydell56943e82016-01-21 14:15:04 +0000573 if (asidx == 0) {
574 /* address space 0 gets the convenience alias */
575 cpu->as = as;
576 }
577
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000578 /* KVM cannot currently support multiple address spaces. */
579 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000580
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000581 if (!cpu->cpu_ases) {
582 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000583 }
Peter Maydell32857f42015-10-01 15:29:50 +0100584
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000585 newas = &cpu->cpu_ases[asidx];
586 newas->cpu = cpu;
587 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000588 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000589 newas->tcg_as_listener.commit = tcg_commit;
590 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000591 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000592}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000593
594AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
595{
596 /* Return the AddressSpace corresponding to the specified index */
597 return cpu->cpu_ases[asidx].as;
598}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599#endif
600
Bharata B Raob7bca732015-06-23 19:31:13 -0700601#ifndef CONFIG_USER_ONLY
602static DECLARE_BITMAP(cpu_index_map, MAX_CPUMASK_BITS);
603
604static int cpu_get_free_index(Error **errp)
605{
606 int cpu = find_first_zero_bit(cpu_index_map, MAX_CPUMASK_BITS);
607
608 if (cpu >= MAX_CPUMASK_BITS) {
609 error_setg(errp, "Trying to use more CPUs than max of %d",
610 MAX_CPUMASK_BITS);
611 return -1;
612 }
613
614 bitmap_set(cpu_index_map, cpu, 1);
615 return cpu;
616}
617
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530618static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700619{
Bharata B Raob7bca732015-06-23 19:31:13 -0700620 bitmap_clear(cpu_index_map, cpu->cpu_index, 1);
Bharata B Raob7bca732015-06-23 19:31:13 -0700621}
622#else
623
624static int cpu_get_free_index(Error **errp)
625{
626 CPUState *some_cpu;
627 int cpu_index = 0;
628
629 CPU_FOREACH(some_cpu) {
630 cpu_index++;
631 }
632 return cpu_index;
633}
634
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530635static void cpu_release_index(CPUState *cpu)
Bharata B Raob7bca732015-06-23 19:31:13 -0700636{
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530637 return;
Bharata B Raob7bca732015-06-23 19:31:13 -0700638}
639#endif
640
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530641void cpu_exec_exit(CPUState *cpu)
642{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530643 CPUClass *cc = CPU_GET_CLASS(cpu);
644
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530645 cpu_list_lock();
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530646 if (cpu->cpu_index == -1) {
647 /* cpu_index was never allocated by this @cpu or was already freed. */
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530648 cpu_list_unlock();
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530649 return;
650 }
651
652 QTAILQ_REMOVE(&cpus, cpu, node);
653 cpu_release_index(cpu);
654 cpu->cpu_index = -1;
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530655 cpu_list_unlock();
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530656
657 if (cc->vmsd != NULL) {
658 vmstate_unregister(NULL, cc->vmsd, cpu);
659 }
660 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
661 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
662 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530663}
664
Peter Crosthwaite4bad9e32015-06-23 19:31:18 -0700665void cpu_exec_init(CPUState *cpu, Error **errp)
bellardfd6ce8f2003-05-14 19:00:11 +0000666{
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200667 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Bharata B Raob7bca732015-06-23 19:31:13 -0700668 Error *local_err = NULL;
bellard6a00d602005-11-21 23:25:50 +0000669
Peter Maydell56943e82016-01-21 14:15:04 +0000670 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000671 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000672
Eduardo Habkost291135b2015-04-27 17:00:33 -0300673#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300674 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000675
676 /* This is a softmmu CPU object, so create a property for it
677 * so users can wire up its memory. (This can't go in qom/cpu.c
678 * because that file is compiled only once for both user-mode
679 * and system builds.) The default if no link is set up is to use
680 * the system address space.
681 */
682 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
683 (Object **)&cpu->memory,
684 qdev_prop_allow_set_link_before_realize,
685 OBJ_PROP_LINK_UNREF_ON_RELEASE,
686 &error_abort);
687 cpu->memory = system_memory;
688 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300689#endif
690
pbrookc2764712009-03-07 15:24:59 +0000691 cpu_list_lock();
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200692 cpu->cpu_index = cpu_get_free_index(&local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700693 if (local_err) {
694 error_propagate(errp, local_err);
Bharata B Raob7bca732015-06-23 19:31:13 -0700695 cpu_list_unlock();
Bharata B Raob7bca732015-06-23 19:31:13 -0700696 return;
bellard6a00d602005-11-21 23:25:50 +0000697 }
Andreas Färberbdc44642013-06-24 23:50:24 +0200698 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000699 cpu_list_unlock();
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200700
701#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200702 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200703 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200704 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100705 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200706 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100707 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200708#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000709}
710
Paul Brook94df27f2010-02-28 23:47:45 +0000711#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200712static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000713{
714 tb_invalidate_phys_page_range(pc, pc + 1, 0);
715}
716#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200717static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400718{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000719 MemTxAttrs attrs;
720 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
721 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400722 if (phys != -1) {
Peter Maydell5232e4c2016-01-21 14:15:06 +0000723 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100724 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400725 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400726}
bellardc27004e2005-01-03 23:35:10 +0000727#endif
bellardd720b932004-04-25 17:57:43 +0000728
Paul Brookc527ee82010-03-01 03:31:14 +0000729#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200730void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000731
732{
733}
734
Peter Maydell3ee887e2014-09-12 14:06:48 +0100735int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
736 int flags)
737{
738 return -ENOSYS;
739}
740
741void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
742{
743}
744
Andreas Färber75a34032013-09-02 16:57:02 +0200745int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000746 int flags, CPUWatchpoint **watchpoint)
747{
748 return -ENOSYS;
749}
750#else
pbrook6658ffb2007-03-16 23:58:11 +0000751/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200752int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000753 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000754{
aliguoric0ce9982008-11-25 22:13:57 +0000755 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000756
Peter Maydell05068c02014-09-12 14:06:48 +0100757 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700758 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200759 error_report("tried to set invalid watchpoint at %"
760 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000761 return -EINVAL;
762 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500763 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000764
aliguoria1d1bb32008-11-18 20:07:32 +0000765 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100766 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000767 wp->flags = flags;
768
aliguori2dc9f412008-11-18 20:56:59 +0000769 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200770 if (flags & BP_GDB) {
771 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
772 } else {
773 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
774 }
aliguoria1d1bb32008-11-18 20:07:32 +0000775
Andreas Färber31b030d2013-09-04 01:29:02 +0200776 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000777
778 if (watchpoint)
779 *watchpoint = wp;
780 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000781}
782
aliguoria1d1bb32008-11-18 20:07:32 +0000783/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200784int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000785 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000786{
aliguoria1d1bb32008-11-18 20:07:32 +0000787 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000788
Andreas Färberff4700b2013-08-26 18:23:18 +0200789 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100790 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000791 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200792 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000793 return 0;
794 }
795 }
aliguoria1d1bb32008-11-18 20:07:32 +0000796 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000797}
798
aliguoria1d1bb32008-11-18 20:07:32 +0000799/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200800void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000801{
Andreas Färberff4700b2013-08-26 18:23:18 +0200802 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000803
Andreas Färber31b030d2013-09-04 01:29:02 +0200804 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000805
Anthony Liguori7267c092011-08-20 22:09:37 -0500806 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000807}
808
aliguoria1d1bb32008-11-18 20:07:32 +0000809/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200810void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000811{
aliguoric0ce9982008-11-25 22:13:57 +0000812 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000813
Andreas Färberff4700b2013-08-26 18:23:18 +0200814 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200815 if (wp->flags & mask) {
816 cpu_watchpoint_remove_by_ref(cpu, wp);
817 }
aliguoric0ce9982008-11-25 22:13:57 +0000818 }
aliguoria1d1bb32008-11-18 20:07:32 +0000819}
Peter Maydell05068c02014-09-12 14:06:48 +0100820
821/* Return true if this watchpoint address matches the specified
822 * access (ie the address range covered by the watchpoint overlaps
823 * partially or completely with the address range covered by the
824 * access).
825 */
826static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
827 vaddr addr,
828 vaddr len)
829{
830 /* We know the lengths are non-zero, but a little caution is
831 * required to avoid errors in the case where the range ends
832 * exactly at the top of the address space and so addr + len
833 * wraps round to zero.
834 */
835 vaddr wpend = wp->vaddr + wp->len - 1;
836 vaddr addrend = addr + len - 1;
837
838 return !(addr > wpend || wp->vaddr > addrend);
839}
840
Paul Brookc527ee82010-03-01 03:31:14 +0000841#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000842
843/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200844int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000845 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000846{
aliguoric0ce9982008-11-25 22:13:57 +0000847 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000848
Anthony Liguori7267c092011-08-20 22:09:37 -0500849 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000850
851 bp->pc = pc;
852 bp->flags = flags;
853
aliguori2dc9f412008-11-18 20:56:59 +0000854 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200855 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200856 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200857 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200858 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200859 }
aliguoria1d1bb32008-11-18 20:07:32 +0000860
Andreas Färberf0c3c502013-08-26 21:22:53 +0200861 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000862
Andreas Färber00b941e2013-06-29 18:55:54 +0200863 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000864 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200865 }
aliguoria1d1bb32008-11-18 20:07:32 +0000866 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000867}
868
869/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200870int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000871{
aliguoria1d1bb32008-11-18 20:07:32 +0000872 CPUBreakpoint *bp;
873
Andreas Färberf0c3c502013-08-26 21:22:53 +0200874 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000875 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200876 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000877 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000878 }
bellard4c3a88a2003-07-26 12:06:08 +0000879 }
aliguoria1d1bb32008-11-18 20:07:32 +0000880 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000881}
882
aliguoria1d1bb32008-11-18 20:07:32 +0000883/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200884void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000885{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200886 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
887
888 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000889
Anthony Liguori7267c092011-08-20 22:09:37 -0500890 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000891}
892
893/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200894void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000895{
aliguoric0ce9982008-11-25 22:13:57 +0000896 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000897
Andreas Färberf0c3c502013-08-26 21:22:53 +0200898 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200899 if (bp->flags & mask) {
900 cpu_breakpoint_remove_by_ref(cpu, bp);
901 }
aliguoric0ce9982008-11-25 22:13:57 +0000902 }
bellard4c3a88a2003-07-26 12:06:08 +0000903}
904
bellardc33a3462003-07-29 20:50:33 +0000905/* enable or disable single step mode. EXCP_DEBUG is returned by the
906 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200907void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000908{
Andreas Färbered2803d2013-06-21 20:20:45 +0200909 if (cpu->singlestep_enabled != enabled) {
910 cpu->singlestep_enabled = enabled;
911 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200912 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200913 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100914 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000915 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700916 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000917 }
bellardc33a3462003-07-29 20:50:33 +0000918 }
bellardc33a3462003-07-29 20:50:33 +0000919}
920
Andreas Färbera47dddd2013-09-03 17:38:47 +0200921void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000922{
923 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000924 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000925
926 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000927 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000928 fprintf(stderr, "qemu: fatal: ");
929 vfprintf(stderr, fmt, ap);
930 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200931 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100932 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000933 qemu_log("qemu: fatal: ");
934 qemu_log_vprintf(fmt, ap2);
935 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200936 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000937 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000938 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000939 }
pbrook493ae1f2007-11-23 16:53:59 +0000940 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000941 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300942 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200943#if defined(CONFIG_USER_ONLY)
944 {
945 struct sigaction act;
946 sigfillset(&act.sa_mask);
947 act.sa_handler = SIG_DFL;
948 sigaction(SIGABRT, &act, NULL);
949 }
950#endif
bellard75012672003-06-21 13:11:07 +0000951 abort();
952}
953
bellard01243112004-01-04 15:48:17 +0000954#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400955/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200956static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
957{
958 RAMBlock *block;
959
Paolo Bonzini43771532013-09-09 17:58:40 +0200960 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200961 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200962 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200963 }
Mike Day0dc3f442013-09-05 14:41:35 -0400964 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200965 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200966 goto found;
967 }
968 }
969
970 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
971 abort();
972
973found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200974 /* It is safe to write mru_block outside the iothread lock. This
975 * is what happens:
976 *
977 * mru_block = xxx
978 * rcu_read_unlock()
979 * xxx removed from list
980 * rcu_read_lock()
981 * read mru_block
982 * mru_block = NULL;
983 * call_rcu(reclaim_ramblock, xxx);
984 * rcu_read_unlock()
985 *
986 * atomic_rcu_set is not needed here. The block was already published
987 * when it was placed into the list. Here we're just making an extra
988 * copy of the pointer.
989 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200990 ram_list.mru_block = block;
991 return block;
992}
993
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200994static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000995{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700996 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200997 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200998 RAMBlock *block;
999 ram_addr_t end;
1000
1001 end = TARGET_PAGE_ALIGN(start + length);
1002 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +00001003
Mike Day0dc3f442013-09-05 14:41:35 -04001004 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +02001005 block = qemu_get_ram_block(start);
1006 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001007 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001008 CPU_FOREACH(cpu) {
1009 tlb_reset_dirty(cpu, start1, length);
1010 }
Mike Day0dc3f442013-09-05 14:41:35 -04001011 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +02001012}
1013
1014/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001015bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
1016 ram_addr_t length,
1017 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001018{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001019 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001020 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001021 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001022
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001023 if (length == 0) {
1024 return false;
1025 }
1026
1027 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1028 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001029
1030 rcu_read_lock();
1031
1032 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1033
1034 while (page < end) {
1035 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1036 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1037 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1038
1039 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1040 offset, num);
1041 page += num;
1042 }
1043
1044 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001045
1046 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001047 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001048 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001049
1050 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001051}
1052
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001053/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001054hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001055 MemoryRegionSection *section,
1056 target_ulong vaddr,
1057 hwaddr paddr, hwaddr xlat,
1058 int prot,
1059 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001060{
Avi Kivitya8170e52012-10-23 12:30:10 +02001061 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001062 CPUWatchpoint *wp;
1063
Blue Swirlcc5bea62012-04-14 14:56:48 +00001064 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001065 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001066 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001067 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001068 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001069 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001070 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001071 }
1072 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001073 AddressSpaceDispatch *d;
1074
1075 d = atomic_rcu_read(&section->address_space->dispatch);
1076 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001077 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001078 }
1079
1080 /* Make accesses to pages with watchpoints go via the
1081 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001082 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001083 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001084 /* Avoid trapping reads of pages with a write breakpoint. */
1085 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001086 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001087 *address |= TLB_MMIO;
1088 break;
1089 }
1090 }
1091 }
1092
1093 return iotlb;
1094}
bellard9fa3e852004-01-04 18:06:42 +00001095#endif /* defined(CONFIG_USER_ONLY) */
1096
pbrooke2eef172008-06-08 01:09:01 +00001097#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001098
Anthony Liguoric227f092009-10-01 16:12:16 -05001099static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001100 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001101static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001102
Igor Mammedova2b257d2014-10-31 16:38:37 +00001103static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1104 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001105
1106/*
1107 * Set a custom physical guest memory alloator.
1108 * Accelerators with unusual needs may need this. Hopefully, we can
1109 * get rid of it eventually.
1110 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001111void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001112{
1113 phys_mem_alloc = alloc;
1114}
1115
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001116static uint16_t phys_section_add(PhysPageMap *map,
1117 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001118{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001119 /* The physical section number is ORed with a page-aligned
1120 * pointer to produce the iotlb entries. Thus it should
1121 * never overflow into the page-aligned value.
1122 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001123 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001124
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001125 if (map->sections_nb == map->sections_nb_alloc) {
1126 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1127 map->sections = g_renew(MemoryRegionSection, map->sections,
1128 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001129 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001130 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001131 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001132 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001133}
1134
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001135static void phys_section_destroy(MemoryRegion *mr)
1136{
Don Slutz55b4e802015-11-30 17:11:04 -05001137 bool have_sub_page = mr->subpage;
1138
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001139 memory_region_unref(mr);
1140
Don Slutz55b4e802015-11-30 17:11:04 -05001141 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001142 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001143 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001144 g_free(subpage);
1145 }
1146}
1147
Paolo Bonzini60926662013-05-29 12:30:26 +02001148static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001149{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001150 while (map->sections_nb > 0) {
1151 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001152 phys_section_destroy(section->mr);
1153 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001154 g_free(map->sections);
1155 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001156}
1157
Avi Kivityac1970f2012-10-03 16:22:53 +02001158static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159{
1160 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001161 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001162 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001163 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001164 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001165 MemoryRegionSection subsection = {
1166 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001167 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001168 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001169 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001170
Avi Kivityf3705d52012-03-08 16:16:34 +02001171 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001172
Avi Kivityf3705d52012-03-08 16:16:34 +02001173 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001174 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001175 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001176 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001177 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001178 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001179 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001180 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001181 }
1182 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001183 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001184 subpage_register(subpage, start, end,
1185 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001186}
1187
1188
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001189static void register_multipage(AddressSpaceDispatch *d,
1190 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001191{
Avi Kivitya8170e52012-10-23 12:30:10 +02001192 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001193 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001194 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1195 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001196
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001197 assert(num_pages);
1198 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001199}
1200
Avi Kivityac1970f2012-10-03 16:22:53 +02001201static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001202{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001203 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001204 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001205 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001206 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001207
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001208 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1209 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1210 - now.offset_within_address_space;
1211
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001212 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001213 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001214 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001215 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001216 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001217 while (int128_ne(remain.size, now.size)) {
1218 remain.size = int128_sub(remain.size, now.size);
1219 remain.offset_within_address_space += int128_get64(now.size);
1220 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001221 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001222 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001223 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001224 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001225 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001226 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001227 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001228 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001229 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001230 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001231 }
1232}
1233
Sheng Yang62a27442010-01-26 19:21:16 +08001234void qemu_flush_coalesced_mmio_buffer(void)
1235{
1236 if (kvm_enabled())
1237 kvm_flush_coalesced_mmio_buffer();
1238}
1239
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001240void qemu_mutex_lock_ramlist(void)
1241{
1242 qemu_mutex_lock(&ram_list.mutex);
1243}
1244
1245void qemu_mutex_unlock_ramlist(void)
1246{
1247 qemu_mutex_unlock(&ram_list.mutex);
1248}
1249
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001250#ifdef __linux__
Alex Williamson04b16652010-07-02 11:13:17 -06001251static void *file_ram_alloc(RAMBlock *block,
1252 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001253 const char *path,
1254 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001255{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001256 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001257 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001258 char *sanitized_name;
1259 char *c;
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +03001260 void *area;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001261 int fd = -1;
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001262 int64_t page_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001263
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001264 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1265 error_setg(errp,
1266 "host lacks kvm mmu notifiers, -mem-path unsupported");
1267 return NULL;
1268 }
1269
1270 for (;;) {
1271 fd = open(path, O_RDWR);
1272 if (fd >= 0) {
1273 /* @path names an existing file, use it */
1274 break;
1275 }
1276 if (errno == ENOENT) {
1277 /* @path names a file that doesn't exist, create it */
1278 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1279 if (fd >= 0) {
1280 unlink_on_error = true;
1281 break;
1282 }
1283 } else if (errno == EISDIR) {
1284 /* @path names a directory, create a file there */
1285 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1286 sanitized_name = g_strdup(memory_region_name(block->mr));
1287 for (c = sanitized_name; *c != '\0'; c++) {
1288 if (*c == '/') {
1289 *c = '_';
1290 }
1291 }
1292
1293 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1294 sanitized_name);
1295 g_free(sanitized_name);
1296
1297 fd = mkstemp(filename);
1298 if (fd >= 0) {
1299 unlink(filename);
1300 g_free(filename);
1301 break;
1302 }
1303 g_free(filename);
1304 }
1305 if (errno != EEXIST && errno != EINTR) {
1306 error_setg_errno(errp, errno,
1307 "can't open backing store %s for guest RAM",
1308 path);
1309 goto error;
1310 }
1311 /*
1312 * Try again on EINTR and EEXIST. The latter happens when
1313 * something else creates the file between our two open().
1314 */
1315 }
1316
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001317 page_size = qemu_fd_getpagesize(fd);
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001318 block->mr->align = MAX(page_size, QEMU_VMALLOC_ALIGN);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001319
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001320 if (memory < page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001321 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001322 "or larger than page size 0x%" PRIx64,
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001323 memory, page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001324 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001325 }
1326
Markus Armbrustere1fb6472016-03-07 20:25:14 +01001327 memory = ROUND_UP(memory, page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001328
1329 /*
1330 * ftruncate is not supported by hugetlbfs in older
1331 * hosts, so don't bother bailing out on errors.
1332 * If anything goes wrong with it under other filesystems,
1333 * mmap will fail.
1334 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001335 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001336 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001337 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001338
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001339 area = qemu_ram_mmap(fd, memory, block->mr->align,
1340 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001341 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001342 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001343 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001344 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001345 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001346
1347 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001348 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001349 }
1350
Alex Williamson04b16652010-07-02 11:13:17 -06001351 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001352 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001353
1354error:
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001355 if (unlink_on_error) {
1356 unlink(path);
1357 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001358 if (fd != -1) {
1359 close(fd);
1360 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001361 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001362}
1363#endif
1364
Mike Day0dc3f442013-09-05 14:41:35 -04001365/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001366static ram_addr_t find_ram_offset(ram_addr_t size)
1367{
Alex Williamson04b16652010-07-02 11:13:17 -06001368 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001369 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001370
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001371 assert(size != 0); /* it would hand out same offset multiple times */
1372
Mike Day0dc3f442013-09-05 14:41:35 -04001373 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001374 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001375 }
Alex Williamson04b16652010-07-02 11:13:17 -06001376
Mike Day0dc3f442013-09-05 14:41:35 -04001377 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001378 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001379
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001380 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001381
Mike Day0dc3f442013-09-05 14:41:35 -04001382 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001383 if (next_block->offset >= end) {
1384 next = MIN(next, next_block->offset);
1385 }
1386 }
1387 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001388 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001389 mingap = next - end;
1390 }
1391 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001392
1393 if (offset == RAM_ADDR_MAX) {
1394 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1395 (uint64_t)size);
1396 abort();
1397 }
1398
Alex Williamson04b16652010-07-02 11:13:17 -06001399 return offset;
1400}
1401
Juan Quintela652d7ec2012-07-20 10:37:54 +02001402ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001403{
Alex Williamsond17b5282010-06-25 11:08:38 -06001404 RAMBlock *block;
1405 ram_addr_t last = 0;
1406
Mike Day0dc3f442013-09-05 14:41:35 -04001407 rcu_read_lock();
1408 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001409 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001410 }
Mike Day0dc3f442013-09-05 14:41:35 -04001411 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001412 return last;
1413}
1414
Jason Baronddb97f12012-08-02 15:44:16 -04001415static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1416{
1417 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001418
1419 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001420 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001421 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1422 if (ret) {
1423 perror("qemu_madvise");
1424 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1425 "but dump_guest_core=off specified\n");
1426 }
1427 }
1428}
1429
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001430const char *qemu_ram_get_idstr(RAMBlock *rb)
1431{
1432 return rb->idstr;
1433}
1434
Mike Dayae3a7042013-09-05 14:41:35 -04001435/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001436void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001437{
Gongleifa53a0e2016-05-10 10:04:59 +08001438 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001439
Avi Kivityc5705a72011-12-20 15:59:12 +02001440 assert(new_block);
1441 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001442
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001443 if (dev) {
1444 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001445 if (id) {
1446 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001447 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001448 }
1449 }
1450 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1451
Gongleiab0a9952016-05-10 10:05:00 +08001452 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001453 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001454 if (block != new_block &&
1455 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001456 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1457 new_block->idstr);
1458 abort();
1459 }
1460 }
Mike Day0dc3f442013-09-05 14:41:35 -04001461 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001462}
1463
Mike Dayae3a7042013-09-05 14:41:35 -04001464/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001465void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001466{
Mike Dayae3a7042013-09-05 14:41:35 -04001467 /* FIXME: arch_init.c assumes that this is not called throughout
1468 * migration. Ignore the problem since hot-unplug during migration
1469 * does not work anyway.
1470 */
Hu Tao20cfe882014-04-02 15:13:26 +08001471 if (block) {
1472 memset(block->idstr, 0, sizeof(block->idstr));
1473 }
1474}
1475
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001476static int memory_try_enable_merging(void *addr, size_t len)
1477{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001478 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001479 /* disabled by the user */
1480 return 0;
1481 }
1482
1483 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1484}
1485
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001486/* Only legal before guest might have detected the memory size: e.g. on
1487 * incoming migration, or right after reset.
1488 *
1489 * As memory core doesn't know how is memory accessed, it is up to
1490 * resize callback to update device state and/or add assertions to detect
1491 * misuse, if necessary.
1492 */
Gongleifa53a0e2016-05-10 10:04:59 +08001493int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001494{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001495 assert(block);
1496
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001497 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001498
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001499 if (block->used_length == newsize) {
1500 return 0;
1501 }
1502
1503 if (!(block->flags & RAM_RESIZEABLE)) {
1504 error_setg_errno(errp, EINVAL,
1505 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1506 " in != 0x" RAM_ADDR_FMT, block->idstr,
1507 newsize, block->used_length);
1508 return -EINVAL;
1509 }
1510
1511 if (block->max_length < newsize) {
1512 error_setg_errno(errp, EINVAL,
1513 "Length too large: %s: 0x" RAM_ADDR_FMT
1514 " > 0x" RAM_ADDR_FMT, block->idstr,
1515 newsize, block->max_length);
1516 return -EINVAL;
1517 }
1518
1519 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1520 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001521 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1522 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001523 memory_region_set_size(block->mr, newsize);
1524 if (block->resized) {
1525 block->resized(block->idstr, newsize, block->host);
1526 }
1527 return 0;
1528}
1529
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001530/* Called with ram_list.mutex held */
1531static void dirty_memory_extend(ram_addr_t old_ram_size,
1532 ram_addr_t new_ram_size)
1533{
1534 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1535 DIRTY_MEMORY_BLOCK_SIZE);
1536 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1537 DIRTY_MEMORY_BLOCK_SIZE);
1538 int i;
1539
1540 /* Only need to extend if block count increased */
1541 if (new_num_blocks <= old_num_blocks) {
1542 return;
1543 }
1544
1545 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1546 DirtyMemoryBlocks *old_blocks;
1547 DirtyMemoryBlocks *new_blocks;
1548 int j;
1549
1550 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1551 new_blocks = g_malloc(sizeof(*new_blocks) +
1552 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1553
1554 if (old_num_blocks) {
1555 memcpy(new_blocks->blocks, old_blocks->blocks,
1556 old_num_blocks * sizeof(old_blocks->blocks[0]));
1557 }
1558
1559 for (j = old_num_blocks; j < new_num_blocks; j++) {
1560 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1561 }
1562
1563 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1564
1565 if (old_blocks) {
1566 g_free_rcu(old_blocks, rcu);
1567 }
1568 }
1569}
1570
Fam Zheng528f46a2016-03-01 14:18:18 +08001571static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001572{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001573 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001574 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001575 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001576 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001577
1578 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001579
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001580 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001581 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001582
1583 if (!new_block->host) {
1584 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001585 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001586 new_block->mr, &err);
1587 if (err) {
1588 error_propagate(errp, err);
1589 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001590 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001591 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001592 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001593 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001594 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001595 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001596 error_setg_errno(errp, errno,
1597 "cannot set up guest memory '%s'",
1598 memory_region_name(new_block->mr));
1599 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001600 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001601 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001602 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001603 }
1604 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001605
Li Zhijiandd631692015-07-02 20:18:06 +08001606 new_ram_size = MAX(old_ram_size,
1607 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1608 if (new_ram_size > old_ram_size) {
1609 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001610 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001611 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001612 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1613 * QLIST (which has an RCU-friendly variant) does not have insertion at
1614 * tail, so save the last element in last_block.
1615 */
Mike Day0dc3f442013-09-05 14:41:35 -04001616 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001617 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001618 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001619 break;
1620 }
1621 }
1622 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001623 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001624 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001625 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001626 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001627 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001628 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001629 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001630
Mike Day0dc3f442013-09-05 14:41:35 -04001631 /* Write list before version */
1632 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001633 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001634 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001635
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001636 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001637 new_block->used_length,
1638 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001639
Paolo Bonzinia904c912015-01-21 16:18:35 +01001640 if (new_block->host) {
1641 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1642 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1643 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1644 if (kvm_enabled()) {
1645 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1646 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001647 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001648}
1649
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001650#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001651RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1652 bool share, const char *mem_path,
1653 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001654{
1655 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001656 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657
1658 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001659 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001660 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001661 }
1662
1663 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1664 /*
1665 * file_ram_alloc() needs to allocate just like
1666 * phys_mem_alloc, but we haven't bothered to provide
1667 * a hook there.
1668 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001669 error_setg(errp,
1670 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001671 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001672 }
1673
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001674 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001675 new_block = g_malloc0(sizeof(*new_block));
1676 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001677 new_block->used_length = size;
1678 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001679 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001680 new_block->host = file_ram_alloc(new_block, size,
1681 mem_path, errp);
1682 if (!new_block->host) {
1683 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001684 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001685 }
1686
Fam Zheng528f46a2016-03-01 14:18:18 +08001687 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001688 if (local_err) {
1689 g_free(new_block);
1690 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001691 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001692 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001693 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001694}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001695#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001696
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001697static
Fam Zheng528f46a2016-03-01 14:18:18 +08001698RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1699 void (*resized)(const char*,
1700 uint64_t length,
1701 void *host),
1702 void *host, bool resizeable,
1703 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001704{
1705 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001706 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001707
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001708 size = HOST_PAGE_ALIGN(size);
1709 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001710 new_block = g_malloc0(sizeof(*new_block));
1711 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001712 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001713 new_block->used_length = size;
1714 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001715 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001716 new_block->fd = -1;
1717 new_block->host = host;
1718 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001719 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001720 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001721 if (resizeable) {
1722 new_block->flags |= RAM_RESIZEABLE;
1723 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001724 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001725 if (local_err) {
1726 g_free(new_block);
1727 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001728 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001729 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001730 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001731}
1732
Fam Zheng528f46a2016-03-01 14:18:18 +08001733RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001734 MemoryRegion *mr, Error **errp)
1735{
1736 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1737}
1738
Fam Zheng528f46a2016-03-01 14:18:18 +08001739RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001740{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001741 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1742}
1743
Fam Zheng528f46a2016-03-01 14:18:18 +08001744RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001745 void (*resized)(const char*,
1746 uint64_t length,
1747 void *host),
1748 MemoryRegion *mr, Error **errp)
1749{
1750 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001751}
bellarde9a1ab12007-02-08 23:08:38 +00001752
Paolo Bonzini43771532013-09-09 17:58:40 +02001753static void reclaim_ramblock(RAMBlock *block)
1754{
1755 if (block->flags & RAM_PREALLOC) {
1756 ;
1757 } else if (xen_enabled()) {
1758 xen_invalidate_map_cache_entry(block->host);
1759#ifndef _WIN32
1760 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001761 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001762 close(block->fd);
1763#endif
1764 } else {
1765 qemu_anon_ram_free(block->host, block->max_length);
1766 }
1767 g_free(block);
1768}
1769
Fam Zhengf1060c52016-03-01 14:18:22 +08001770void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001771{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001772 if (!block) {
1773 return;
1774 }
1775
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001776 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001777 QLIST_REMOVE_RCU(block, next);
1778 ram_list.mru_block = NULL;
1779 /* Write list before version */
1780 smp_wmb();
1781 ram_list.version++;
1782 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001783 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001784}
1785
Huang Yingcd19cfa2011-03-02 08:56:19 +01001786#ifndef _WIN32
1787void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1788{
1789 RAMBlock *block;
1790 ram_addr_t offset;
1791 int flags;
1792 void *area, *vaddr;
1793
Mike Day0dc3f442013-09-05 14:41:35 -04001794 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001795 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001796 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001797 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001798 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001799 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001800 } else if (xen_enabled()) {
1801 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001802 } else {
1803 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001804 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001805 flags |= (block->flags & RAM_SHARED ?
1806 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001807 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1808 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001809 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001810 /*
1811 * Remap needs to match alloc. Accelerators that
1812 * set phys_mem_alloc never remap. If they did,
1813 * we'd need a remap hook here.
1814 */
1815 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1816
Huang Yingcd19cfa2011-03-02 08:56:19 +01001817 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1818 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1819 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001820 }
1821 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001822 fprintf(stderr, "Could not remap addr: "
1823 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001824 length, addr);
1825 exit(1);
1826 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001827 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001828 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001829 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001830 }
1831 }
1832}
1833#endif /* !_WIN32 */
1834
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001835/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001836 * This should not be used for general purpose DMA. Use address_space_map
1837 * or address_space_rw instead. For local memory (e.g. video ram) that the
1838 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001839 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001840 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001841 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001842void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001843{
Gonglei3655cb92016-02-20 10:35:20 +08001844 RAMBlock *block = ram_block;
1845
1846 if (block == NULL) {
1847 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001848 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001849 }
Mike Dayae3a7042013-09-05 14:41:35 -04001850
1851 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001852 /* We need to check if the requested address is in the RAM
1853 * because we don't want to map the entire memory in QEMU.
1854 * In that case just map until the end of the page.
1855 */
1856 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001857 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001858 }
Mike Dayae3a7042013-09-05 14:41:35 -04001859
1860 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001861 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001862 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001863}
1864
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001865/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001866 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001867 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001868 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001869 */
Gonglei3655cb92016-02-20 10:35:20 +08001870static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1871 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001872{
Gonglei3655cb92016-02-20 10:35:20 +08001873 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001874 if (*size == 0) {
1875 return NULL;
1876 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001877
Gonglei3655cb92016-02-20 10:35:20 +08001878 if (block == NULL) {
1879 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001880 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001881 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001882 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001883
1884 if (xen_enabled() && block->host == NULL) {
1885 /* We need to check if the requested address is in the RAM
1886 * because we don't want to map the entire memory in QEMU.
1887 * In that case just map the requested area.
1888 */
1889 if (block->offset == 0) {
1890 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001891 }
1892
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001893 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001894 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001895
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001896 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001897}
1898
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001899/*
1900 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1901 * in that RAMBlock.
1902 *
1903 * ptr: Host pointer to look up
1904 * round_offset: If true round the result offset down to a page boundary
1905 * *ram_addr: set to result ram_addr
1906 * *offset: set to result offset within the RAMBlock
1907 *
1908 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001909 *
1910 * By the time this function returns, the returned pointer is not protected
1911 * by RCU anymore. If the caller is not within an RCU critical section and
1912 * does not hold the iothread lock, it must have other means of protecting the
1913 * pointer, such as a reference to the region that includes the incoming
1914 * ram_addr_t.
1915 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001916RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001917 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001918{
pbrook94a6b542009-04-11 17:15:54 +00001919 RAMBlock *block;
1920 uint8_t *host = ptr;
1921
Jan Kiszka868bb332011-06-21 22:59:09 +02001922 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001923 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001924 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001925 ram_addr = xen_ram_addr_from_mapcache(ptr);
1926 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001927 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001928 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001929 }
Mike Day0dc3f442013-09-05 14:41:35 -04001930 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001931 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001932 }
1933
Mike Day0dc3f442013-09-05 14:41:35 -04001934 rcu_read_lock();
1935 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001936 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001937 goto found;
1938 }
1939
Mike Day0dc3f442013-09-05 14:41:35 -04001940 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001941 /* This case append when the block is not mapped. */
1942 if (block->host == NULL) {
1943 continue;
1944 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001945 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001946 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001947 }
pbrook94a6b542009-04-11 17:15:54 +00001948 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001949
Mike Day0dc3f442013-09-05 14:41:35 -04001950 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001951 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001952
1953found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001954 *offset = (host - block->host);
1955 if (round_offset) {
1956 *offset &= TARGET_PAGE_MASK;
1957 }
Mike Day0dc3f442013-09-05 14:41:35 -04001958 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001959 return block;
1960}
1961
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001962/*
1963 * Finds the named RAMBlock
1964 *
1965 * name: The name of RAMBlock to find
1966 *
1967 * Returns: RAMBlock (or NULL if not found)
1968 */
1969RAMBlock *qemu_ram_block_by_name(const char *name)
1970{
1971 RAMBlock *block;
1972
1973 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1974 if (!strcmp(name, block->idstr)) {
1975 return block;
1976 }
1977 }
1978
1979 return NULL;
1980}
1981
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001982/* Some of the softmmu routines need to translate from a host pointer
1983 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001984ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001985{
1986 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001987 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001988
Paolo Bonzinif615f392016-05-26 10:07:50 +02001989 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001990 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001991 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001992 }
1993
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001994 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001995}
Alex Williamsonf471a172010-06-11 11:11:42 -06001996
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001997/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02001998static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001999 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002000{
Juan Quintela52159192013-10-08 12:44:04 +02002001 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002002 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002003 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002004 switch (size) {
2005 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002006 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002007 break;
2008 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002009 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002010 break;
2011 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002012 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002013 break;
2014 default:
2015 abort();
2016 }
Paolo Bonzini58d27072015-03-23 11:56:01 +01002017 /* Set both VGA and migration bits for simplicity and to remove
2018 * the notdirty callback faster.
2019 */
2020 cpu_physical_memory_set_dirty_range(ram_addr, size,
2021 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002022 /* we remove the notdirty callback only if the code has been
2023 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002024 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002025 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002026 }
bellard1ccde1c2004-02-06 19:46:14 +00002027}
2028
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002029static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2030 unsigned size, bool is_write)
2031{
2032 return is_write;
2033}
2034
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002035static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002036 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002037 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002038 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002039};
2040
pbrook0f459d12008-06-09 00:20:13 +00002041/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002042static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002043{
Andreas Färber93afead2013-08-26 03:41:01 +02002044 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002045 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002046 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002047 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002048 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002049 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002050 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002051
Andreas Färberff4700b2013-08-26 18:23:18 +02002052 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002053 /* We re-entered the check after replacing the TB. Now raise
2054 * the debug interrupt so that is will trigger after the
2055 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002056 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002057 return;
2058 }
Andreas Färber93afead2013-08-26 03:41:01 +02002059 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002060 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002061 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2062 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002063 if (flags == BP_MEM_READ) {
2064 wp->flags |= BP_WATCHPOINT_HIT_READ;
2065 } else {
2066 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2067 }
2068 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002069 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002070 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002071 if (wp->flags & BP_CPU &&
2072 !cc->debug_check_watchpoint(cpu, wp)) {
2073 wp->flags &= ~BP_WATCHPOINT_HIT;
2074 continue;
2075 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002076 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02002077 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002078 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002079 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002080 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002081 } else {
2082 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002083 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002084 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002085 }
aliguori06d55cc2008-11-18 20:24:06 +00002086 }
aliguori6e140f22008-11-18 20:37:55 +00002087 } else {
2088 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002089 }
2090 }
2091}
2092
pbrook6658ffb2007-03-16 23:58:11 +00002093/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2094 so these check for a hit then pass through to the normal out-of-line
2095 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002096static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2097 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002098{
Peter Maydell66b9b432015-04-26 16:49:24 +01002099 MemTxResult res;
2100 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002101 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2102 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002103
Peter Maydell66b9b432015-04-26 16:49:24 +01002104 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002105 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002106 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002107 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002108 break;
2109 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002110 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002111 break;
2112 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002113 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002114 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002115 default: abort();
2116 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002117 *pdata = data;
2118 return res;
2119}
2120
2121static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2122 uint64_t val, unsigned size,
2123 MemTxAttrs attrs)
2124{
2125 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002126 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2127 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002128
2129 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2130 switch (size) {
2131 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002132 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002133 break;
2134 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002135 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002136 break;
2137 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002138 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002139 break;
2140 default: abort();
2141 }
2142 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002143}
2144
Avi Kivity1ec9b902012-01-02 12:47:48 +02002145static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002146 .read_with_attrs = watch_mem_read,
2147 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002148 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002149};
pbrook6658ffb2007-03-16 23:58:11 +00002150
Peter Maydellf25a49e2015-04-26 16:49:24 +01002151static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2152 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002153{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002154 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002155 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002156 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002157
blueswir1db7b5422007-05-26 17:36:03 +00002158#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002159 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002160 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002161#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002162 res = address_space_read(subpage->as, addr + subpage->base,
2163 attrs, buf, len);
2164 if (res) {
2165 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002166 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002167 switch (len) {
2168 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002169 *data = ldub_p(buf);
2170 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002171 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002172 *data = lduw_p(buf);
2173 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002174 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002175 *data = ldl_p(buf);
2176 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002177 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002178 *data = ldq_p(buf);
2179 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002180 default:
2181 abort();
2182 }
blueswir1db7b5422007-05-26 17:36:03 +00002183}
2184
Peter Maydellf25a49e2015-04-26 16:49:24 +01002185static MemTxResult subpage_write(void *opaque, hwaddr addr,
2186 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002187{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002188 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002189 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002190
blueswir1db7b5422007-05-26 17:36:03 +00002191#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002192 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002193 " value %"PRIx64"\n",
2194 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002195#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002196 switch (len) {
2197 case 1:
2198 stb_p(buf, value);
2199 break;
2200 case 2:
2201 stw_p(buf, value);
2202 break;
2203 case 4:
2204 stl_p(buf, value);
2205 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002206 case 8:
2207 stq_p(buf, value);
2208 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002209 default:
2210 abort();
2211 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002212 return address_space_write(subpage->as, addr + subpage->base,
2213 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002214}
2215
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002216static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002217 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002218{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002219 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002220#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002221 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002222 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002223#endif
2224
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002225 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002226 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002227}
2228
Avi Kivity70c68e42012-01-02 12:32:48 +02002229static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002230 .read_with_attrs = subpage_read,
2231 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002232 .impl.min_access_size = 1,
2233 .impl.max_access_size = 8,
2234 .valid.min_access_size = 1,
2235 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002236 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002237 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002238};
2239
Anthony Liguoric227f092009-10-01 16:12:16 -05002240static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002241 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002242{
2243 int idx, eidx;
2244
2245 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2246 return -1;
2247 idx = SUBPAGE_IDX(start);
2248 eidx = SUBPAGE_IDX(end);
2249#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002250 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2251 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002252#endif
blueswir1db7b5422007-05-26 17:36:03 +00002253 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002254 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002255 }
2256
2257 return 0;
2258}
2259
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002260static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002261{
Anthony Liguoric227f092009-10-01 16:12:16 -05002262 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002263
Anthony Liguori7267c092011-08-20 22:09:37 -05002264 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002265
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002266 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002267 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002268 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002269 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002270 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002271#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002272 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2273 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002274#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002275 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002276
2277 return mmio;
2278}
2279
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002280static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2281 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002282{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002283 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002284 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002285 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002286 .mr = mr,
2287 .offset_within_address_space = 0,
2288 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002289 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002290 };
2291
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002292 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002293}
2294
Peter Maydella54c87b2016-01-21 14:15:05 +00002295MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002296{
Peter Maydella54c87b2016-01-21 14:15:05 +00002297 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2298 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002299 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002300 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002301
2302 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002303}
2304
Avi Kivitye9179ce2009-06-14 11:38:52 +03002305static void io_mem_init(void)
2306{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002307 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002308 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002309 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002310 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002311 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002312 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002313 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002314}
2315
Avi Kivityac1970f2012-10-03 16:22:53 +02002316static void mem_begin(MemoryListener *listener)
2317{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002318 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002319 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2320 uint16_t n;
2321
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002322 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002323 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002324 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002325 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002326 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002327 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002328 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002329 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002330
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002331 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002332 d->as = as;
2333 as->next_dispatch = d;
2334}
2335
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002336static void address_space_dispatch_free(AddressSpaceDispatch *d)
2337{
2338 phys_sections_free(&d->map);
2339 g_free(d);
2340}
2341
Paolo Bonzini00752702013-05-29 12:13:54 +02002342static void mem_commit(MemoryListener *listener)
2343{
2344 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002345 AddressSpaceDispatch *cur = as->dispatch;
2346 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002347
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002348 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002349
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002350 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002351 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002352 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002353 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002354}
2355
Avi Kivity1d711482012-10-02 18:54:45 +02002356static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002357{
Peter Maydell32857f42015-10-01 15:29:50 +01002358 CPUAddressSpace *cpuas;
2359 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002360
2361 /* since each CPU stores ram addresses in its TLB cache, we must
2362 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002363 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2364 cpu_reloading_memory_map();
2365 /* The CPU and TLB are protected by the iothread lock.
2366 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2367 * may have split the RCU critical section.
2368 */
2369 d = atomic_rcu_read(&cpuas->as->dispatch);
2370 cpuas->memory_dispatch = d;
2371 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002372}
2373
Avi Kivityac1970f2012-10-03 16:22:53 +02002374void address_space_init_dispatch(AddressSpace *as)
2375{
Paolo Bonzini00752702013-05-29 12:13:54 +02002376 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002377 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002378 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002379 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002380 .region_add = mem_add,
2381 .region_nop = mem_add,
2382 .priority = 0,
2383 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002384 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002385}
2386
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002387void address_space_unregister(AddressSpace *as)
2388{
2389 memory_listener_unregister(&as->dispatch_listener);
2390}
2391
Avi Kivity83f3c252012-10-07 12:59:55 +02002392void address_space_destroy_dispatch(AddressSpace *as)
2393{
2394 AddressSpaceDispatch *d = as->dispatch;
2395
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002396 atomic_rcu_set(&as->dispatch, NULL);
2397 if (d) {
2398 call_rcu(d, address_space_dispatch_free, rcu);
2399 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002400}
2401
Avi Kivity62152b82011-07-26 14:26:14 +03002402static void memory_map_init(void)
2403{
Anthony Liguori7267c092011-08-20 22:09:37 -05002404 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002405
Paolo Bonzini57271d62013-11-07 17:14:37 +01002406 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002407 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002408
Anthony Liguori7267c092011-08-20 22:09:37 -05002409 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002410 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2411 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002412 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002413}
2414
2415MemoryRegion *get_system_memory(void)
2416{
2417 return system_memory;
2418}
2419
Avi Kivity309cb472011-08-08 16:09:03 +03002420MemoryRegion *get_system_io(void)
2421{
2422 return system_io;
2423}
2424
pbrooke2eef172008-06-08 01:09:01 +00002425#endif /* !defined(CONFIG_USER_ONLY) */
2426
bellard13eb76e2004-01-24 15:23:36 +00002427/* physical memory access (slow version, mainly for debug) */
2428#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002429int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002430 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002431{
2432 int l, flags;
2433 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002434 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002435
2436 while (len > 0) {
2437 page = addr & TARGET_PAGE_MASK;
2438 l = (page + TARGET_PAGE_SIZE) - addr;
2439 if (l > len)
2440 l = len;
2441 flags = page_get_flags(page);
2442 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002443 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002444 if (is_write) {
2445 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002446 return -1;
bellard579a97f2007-11-11 14:26:47 +00002447 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002448 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002449 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002450 memcpy(p, buf, l);
2451 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002452 } else {
2453 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002454 return -1;
bellard579a97f2007-11-11 14:26:47 +00002455 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002456 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002457 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002458 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002459 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002460 }
2461 len -= l;
2462 buf += l;
2463 addr += l;
2464 }
Paul Brooka68fe892010-03-01 00:08:59 +00002465 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002466}
bellard8df1cd02005-01-28 22:37:22 +00002467
bellard13eb76e2004-01-24 15:23:36 +00002468#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002469
Paolo Bonzini845b6212015-03-23 11:45:53 +01002470static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002471 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002472{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002473 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002474 addr += memory_region_get_ram_addr(mr);
2475
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002476 /* No early return if dirty_log_mask is or becomes 0, because
2477 * cpu_physical_memory_set_dirty_range will still call
2478 * xen_modified_memory.
2479 */
2480 if (dirty_log_mask) {
2481 dirty_log_mask =
2482 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002483 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002484 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
2485 tb_invalidate_phys_range(addr, addr + length);
2486 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2487 }
2488 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002489}
2490
Richard Henderson23326162013-07-08 14:55:59 -07002491static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002492{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002493 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002494
2495 /* Regions are assumed to support 1-4 byte accesses unless
2496 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002497 if (access_size_max == 0) {
2498 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002499 }
Richard Henderson23326162013-07-08 14:55:59 -07002500
2501 /* Bound the maximum access by the alignment of the address. */
2502 if (!mr->ops->impl.unaligned) {
2503 unsigned align_size_max = addr & -addr;
2504 if (align_size_max != 0 && align_size_max < access_size_max) {
2505 access_size_max = align_size_max;
2506 }
2507 }
2508
2509 /* Don't attempt accesses larger than the maximum. */
2510 if (l > access_size_max) {
2511 l = access_size_max;
2512 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002513 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002514
2515 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002516}
2517
Jan Kiszka4840f102015-06-18 18:47:22 +02002518static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002519{
Jan Kiszka4840f102015-06-18 18:47:22 +02002520 bool unlocked = !qemu_mutex_iothread_locked();
2521 bool release_lock = false;
2522
2523 if (unlocked && mr->global_locking) {
2524 qemu_mutex_lock_iothread();
2525 unlocked = false;
2526 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002527 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002528 if (mr->flush_coalesced_mmio) {
2529 if (unlocked) {
2530 qemu_mutex_lock_iothread();
2531 }
2532 qemu_flush_coalesced_mmio_buffer();
2533 if (unlocked) {
2534 qemu_mutex_unlock_iothread();
2535 }
2536 }
2537
2538 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002539}
2540
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002541/* Called within RCU critical section. */
2542static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2543 MemTxAttrs attrs,
2544 const uint8_t *buf,
2545 int len, hwaddr addr1,
2546 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002547{
bellard13eb76e2004-01-24 15:23:36 +00002548 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002549 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002550 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002551 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002552
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002553 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002554 if (!memory_access_is_direct(mr, true)) {
2555 release_lock |= prepare_mmio_access(mr);
2556 l = memory_access_size(mr, l, addr1);
2557 /* XXX: could force current_cpu to NULL to avoid
2558 potential bugs */
2559 switch (l) {
2560 case 8:
2561 /* 64 bit write access */
2562 val = ldq_p(buf);
2563 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2564 attrs);
2565 break;
2566 case 4:
2567 /* 32 bit write access */
2568 val = ldl_p(buf);
2569 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2570 attrs);
2571 break;
2572 case 2:
2573 /* 16 bit write access */
2574 val = lduw_p(buf);
2575 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2576 attrs);
2577 break;
2578 case 1:
2579 /* 8 bit write access */
2580 val = ldub_p(buf);
2581 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2582 attrs);
2583 break;
2584 default:
2585 abort();
bellard13eb76e2004-01-24 15:23:36 +00002586 }
2587 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002588 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002589 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002590 memcpy(ptr, buf, l);
2591 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002592 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002593
2594 if (release_lock) {
2595 qemu_mutex_unlock_iothread();
2596 release_lock = false;
2597 }
2598
bellard13eb76e2004-01-24 15:23:36 +00002599 len -= l;
2600 buf += l;
2601 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002602
2603 if (!len) {
2604 break;
2605 }
2606
2607 l = len;
2608 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002609 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002610
Peter Maydell3b643492015-04-26 16:49:23 +01002611 return result;
bellard13eb76e2004-01-24 15:23:36 +00002612}
bellard8df1cd02005-01-28 22:37:22 +00002613
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002614MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2615 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002616{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002617 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002618 hwaddr addr1;
2619 MemoryRegion *mr;
2620 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002621
2622 if (len > 0) {
2623 rcu_read_lock();
2624 l = len;
2625 mr = address_space_translate(as, addr, &addr1, &l, true);
2626 result = address_space_write_continue(as, addr, attrs, buf, len,
2627 addr1, l, mr);
2628 rcu_read_unlock();
2629 }
2630
2631 return result;
2632}
2633
2634/* Called within RCU critical section. */
2635MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2636 MemTxAttrs attrs, uint8_t *buf,
2637 int len, hwaddr addr1, hwaddr l,
2638 MemoryRegion *mr)
2639{
2640 uint8_t *ptr;
2641 uint64_t val;
2642 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002643 bool release_lock = false;
2644
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002645 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002646 if (!memory_access_is_direct(mr, false)) {
2647 /* I/O case */
2648 release_lock |= prepare_mmio_access(mr);
2649 l = memory_access_size(mr, l, addr1);
2650 switch (l) {
2651 case 8:
2652 /* 64 bit read access */
2653 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2654 attrs);
2655 stq_p(buf, val);
2656 break;
2657 case 4:
2658 /* 32 bit read access */
2659 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2660 attrs);
2661 stl_p(buf, val);
2662 break;
2663 case 2:
2664 /* 16 bit read access */
2665 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2666 attrs);
2667 stw_p(buf, val);
2668 break;
2669 case 1:
2670 /* 8 bit read access */
2671 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2672 attrs);
2673 stb_p(buf, val);
2674 break;
2675 default:
2676 abort();
2677 }
2678 } else {
2679 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002680 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002681 memcpy(buf, ptr, l);
2682 }
2683
2684 if (release_lock) {
2685 qemu_mutex_unlock_iothread();
2686 release_lock = false;
2687 }
2688
2689 len -= l;
2690 buf += l;
2691 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002692
2693 if (!len) {
2694 break;
2695 }
2696
2697 l = len;
2698 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002699 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002700
2701 return result;
2702}
2703
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002704MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2705 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002706{
2707 hwaddr l;
2708 hwaddr addr1;
2709 MemoryRegion *mr;
2710 MemTxResult result = MEMTX_OK;
2711
2712 if (len > 0) {
2713 rcu_read_lock();
2714 l = len;
2715 mr = address_space_translate(as, addr, &addr1, &l, false);
2716 result = address_space_read_continue(as, addr, attrs, buf, len,
2717 addr1, l, mr);
2718 rcu_read_unlock();
2719 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002720
2721 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002722}
2723
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002724MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2725 uint8_t *buf, int len, bool is_write)
2726{
2727 if (is_write) {
2728 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2729 } else {
2730 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2731 }
2732}
Avi Kivityac1970f2012-10-03 16:22:53 +02002733
Avi Kivitya8170e52012-10-23 12:30:10 +02002734void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002735 int len, int is_write)
2736{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002737 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2738 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002739}
2740
Alexander Graf582b55a2013-12-11 14:17:44 +01002741enum write_rom_type {
2742 WRITE_DATA,
2743 FLUSH_CACHE,
2744};
2745
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002746static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002747 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002748{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002749 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002750 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002751 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002752 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002753
Paolo Bonzini41063e12015-03-18 14:21:43 +01002754 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002755 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002756 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002757 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002758
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002759 if (!(memory_region_is_ram(mr) ||
2760 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002761 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002762 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002763 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002764 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002765 switch (type) {
2766 case WRITE_DATA:
2767 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002768 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002769 break;
2770 case FLUSH_CACHE:
2771 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2772 break;
2773 }
bellardd0ecd2a2006-04-23 17:14:48 +00002774 }
2775 len -= l;
2776 buf += l;
2777 addr += l;
2778 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002779 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002780}
2781
Alexander Graf582b55a2013-12-11 14:17:44 +01002782/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002783void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002784 const uint8_t *buf, int len)
2785{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002786 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002787}
2788
2789void cpu_flush_icache_range(hwaddr start, int len)
2790{
2791 /*
2792 * This function should do the same thing as an icache flush that was
2793 * triggered from within the guest. For TCG we are always cache coherent,
2794 * so there is no need to flush anything. For KVM / Xen we need to flush
2795 * the host's instruction cache at least.
2796 */
2797 if (tcg_enabled()) {
2798 return;
2799 }
2800
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002801 cpu_physical_memory_write_rom_internal(&address_space_memory,
2802 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002803}
2804
aliguori6d16c2f2009-01-22 16:59:11 +00002805typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002806 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002807 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002808 hwaddr addr;
2809 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002810 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002811} BounceBuffer;
2812
2813static BounceBuffer bounce;
2814
aliguoriba223c22009-01-22 16:59:16 +00002815typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002816 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002817 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002818} MapClient;
2819
Fam Zheng38e047b2015-03-16 17:03:35 +08002820QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002821static QLIST_HEAD(map_client_list, MapClient) map_client_list
2822 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002823
Fam Zhenge95205e2015-03-16 17:03:37 +08002824static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002825{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002826 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002827 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002828}
2829
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002830static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002831{
2832 MapClient *client;
2833
Blue Swirl72cf2d42009-09-12 07:36:22 +00002834 while (!QLIST_EMPTY(&map_client_list)) {
2835 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002836 qemu_bh_schedule(client->bh);
2837 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002838 }
2839}
2840
Fam Zhenge95205e2015-03-16 17:03:37 +08002841void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002842{
2843 MapClient *client = g_malloc(sizeof(*client));
2844
Fam Zheng38e047b2015-03-16 17:03:35 +08002845 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002846 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002847 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002848 if (!atomic_read(&bounce.in_use)) {
2849 cpu_notify_map_clients_locked();
2850 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002851 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002852}
2853
Fam Zheng38e047b2015-03-16 17:03:35 +08002854void cpu_exec_init_all(void)
2855{
2856 qemu_mutex_init(&ram_list.mutex);
Fam Zheng38e047b2015-03-16 17:03:35 +08002857 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002858 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002859 qemu_mutex_init(&map_client_list_lock);
2860}
2861
Fam Zhenge95205e2015-03-16 17:03:37 +08002862void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002863{
Fam Zhenge95205e2015-03-16 17:03:37 +08002864 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002865
Fam Zhenge95205e2015-03-16 17:03:37 +08002866 qemu_mutex_lock(&map_client_list_lock);
2867 QLIST_FOREACH(client, &map_client_list, link) {
2868 if (client->bh == bh) {
2869 cpu_unregister_map_client_do(client);
2870 break;
2871 }
2872 }
2873 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002874}
2875
2876static void cpu_notify_map_clients(void)
2877{
Fam Zheng38e047b2015-03-16 17:03:35 +08002878 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002879 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002880 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002881}
2882
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002883bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2884{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002885 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002886 hwaddr l, xlat;
2887
Paolo Bonzini41063e12015-03-18 14:21:43 +01002888 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002889 while (len > 0) {
2890 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002891 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2892 if (!memory_access_is_direct(mr, is_write)) {
2893 l = memory_access_size(mr, l, addr);
2894 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002895 return false;
2896 }
2897 }
2898
2899 len -= l;
2900 addr += l;
2901 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002902 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002903 return true;
2904}
2905
aliguori6d16c2f2009-01-22 16:59:11 +00002906/* Map a physical memory region into a host virtual address.
2907 * May map a subset of the requested range, given by and returned in *plen.
2908 * May return NULL if resources needed to perform the mapping are exhausted.
2909 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002910 * Use cpu_register_map_client() to know when retrying the map operation is
2911 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002912 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002913void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002914 hwaddr addr,
2915 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002916 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002917{
Avi Kivitya8170e52012-10-23 12:30:10 +02002918 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002919 hwaddr done = 0;
2920 hwaddr l, xlat, base;
2921 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002922 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002923
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002924 if (len == 0) {
2925 return NULL;
2926 }
aliguori6d16c2f2009-01-22 16:59:11 +00002927
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002928 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002929 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002930 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002931
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002932 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002933 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002934 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002935 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002936 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002937 /* Avoid unbounded allocations */
2938 l = MIN(l, TARGET_PAGE_SIZE);
2939 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002940 bounce.addr = addr;
2941 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002942
2943 memory_region_ref(mr);
2944 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002945 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002946 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2947 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002948 }
aliguori6d16c2f2009-01-22 16:59:11 +00002949
Paolo Bonzini41063e12015-03-18 14:21:43 +01002950 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002951 *plen = l;
2952 return bounce.buffer;
2953 }
2954
2955 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002956
2957 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002958 len -= l;
2959 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002960 done += l;
2961 if (len == 0) {
2962 break;
2963 }
2964
2965 l = len;
2966 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2967 if (this_mr != mr || xlat != base + done) {
2968 break;
2969 }
aliguori6d16c2f2009-01-22 16:59:11 +00002970 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002971
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002972 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002973 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002974 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002975 rcu_read_unlock();
2976
2977 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002978}
2979
Avi Kivityac1970f2012-10-03 16:22:53 +02002980/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002981 * Will also mark the memory as dirty if is_write == 1. access_len gives
2982 * the amount of memory that was actually read or written by the caller.
2983 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002984void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2985 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002986{
2987 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002988 MemoryRegion *mr;
2989 ram_addr_t addr1;
2990
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002991 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002992 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002993 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01002994 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002995 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002996 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002997 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002998 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002999 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003000 return;
3001 }
3002 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003003 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3004 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003005 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003006 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003007 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003008 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003009 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003010 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003011}
bellardd0ecd2a2006-04-23 17:14:48 +00003012
Avi Kivitya8170e52012-10-23 12:30:10 +02003013void *cpu_physical_memory_map(hwaddr addr,
3014 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003015 int is_write)
3016{
3017 return address_space_map(&address_space_memory, addr, plen, is_write);
3018}
3019
Avi Kivitya8170e52012-10-23 12:30:10 +02003020void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3021 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003022{
3023 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3024}
3025
bellard8df1cd02005-01-28 22:37:22 +00003026/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003027static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3028 MemTxAttrs attrs,
3029 MemTxResult *result,
3030 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003031{
bellard8df1cd02005-01-28 22:37:22 +00003032 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003033 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003034 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003035 hwaddr l = 4;
3036 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003037 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003038 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003039
Paolo Bonzini41063e12015-03-18 14:21:43 +01003040 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003041 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003042 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003043 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003044
bellard8df1cd02005-01-28 22:37:22 +00003045 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003046 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003047#if defined(TARGET_WORDS_BIGENDIAN)
3048 if (endian == DEVICE_LITTLE_ENDIAN) {
3049 val = bswap32(val);
3050 }
3051#else
3052 if (endian == DEVICE_BIG_ENDIAN) {
3053 val = bswap32(val);
3054 }
3055#endif
bellard8df1cd02005-01-28 22:37:22 +00003056 } else {
3057 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003058 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003059 switch (endian) {
3060 case DEVICE_LITTLE_ENDIAN:
3061 val = ldl_le_p(ptr);
3062 break;
3063 case DEVICE_BIG_ENDIAN:
3064 val = ldl_be_p(ptr);
3065 break;
3066 default:
3067 val = ldl_p(ptr);
3068 break;
3069 }
Peter Maydell50013112015-04-26 16:49:24 +01003070 r = MEMTX_OK;
3071 }
3072 if (result) {
3073 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003074 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003075 if (release_lock) {
3076 qemu_mutex_unlock_iothread();
3077 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003078 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003079 return val;
3080}
3081
Peter Maydell50013112015-04-26 16:49:24 +01003082uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3083 MemTxAttrs attrs, MemTxResult *result)
3084{
3085 return address_space_ldl_internal(as, addr, attrs, result,
3086 DEVICE_NATIVE_ENDIAN);
3087}
3088
3089uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3090 MemTxAttrs attrs, MemTxResult *result)
3091{
3092 return address_space_ldl_internal(as, addr, attrs, result,
3093 DEVICE_LITTLE_ENDIAN);
3094}
3095
3096uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3097 MemTxAttrs attrs, MemTxResult *result)
3098{
3099 return address_space_ldl_internal(as, addr, attrs, result,
3100 DEVICE_BIG_ENDIAN);
3101}
3102
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003103uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003104{
Peter Maydell50013112015-04-26 16:49:24 +01003105 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003106}
3107
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003108uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003109{
Peter Maydell50013112015-04-26 16:49:24 +01003110 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003111}
3112
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003113uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003114{
Peter Maydell50013112015-04-26 16:49:24 +01003115 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003116}
3117
bellard84b7b8e2005-11-28 21:19:04 +00003118/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003119static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3120 MemTxAttrs attrs,
3121 MemTxResult *result,
3122 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003123{
bellard84b7b8e2005-11-28 21:19:04 +00003124 uint8_t *ptr;
3125 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003126 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003127 hwaddr l = 8;
3128 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003129 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003130 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003131
Paolo Bonzini41063e12015-03-18 14:21:43 +01003132 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003133 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003134 false);
3135 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003136 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003137
bellard84b7b8e2005-11-28 21:19:04 +00003138 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003139 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003140#if defined(TARGET_WORDS_BIGENDIAN)
3141 if (endian == DEVICE_LITTLE_ENDIAN) {
3142 val = bswap64(val);
3143 }
3144#else
3145 if (endian == DEVICE_BIG_ENDIAN) {
3146 val = bswap64(val);
3147 }
3148#endif
bellard84b7b8e2005-11-28 21:19:04 +00003149 } else {
3150 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003151 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003152 switch (endian) {
3153 case DEVICE_LITTLE_ENDIAN:
3154 val = ldq_le_p(ptr);
3155 break;
3156 case DEVICE_BIG_ENDIAN:
3157 val = ldq_be_p(ptr);
3158 break;
3159 default:
3160 val = ldq_p(ptr);
3161 break;
3162 }
Peter Maydell50013112015-04-26 16:49:24 +01003163 r = MEMTX_OK;
3164 }
3165 if (result) {
3166 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003167 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003168 if (release_lock) {
3169 qemu_mutex_unlock_iothread();
3170 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003171 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003172 return val;
3173}
3174
Peter Maydell50013112015-04-26 16:49:24 +01003175uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3176 MemTxAttrs attrs, MemTxResult *result)
3177{
3178 return address_space_ldq_internal(as, addr, attrs, result,
3179 DEVICE_NATIVE_ENDIAN);
3180}
3181
3182uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3183 MemTxAttrs attrs, MemTxResult *result)
3184{
3185 return address_space_ldq_internal(as, addr, attrs, result,
3186 DEVICE_LITTLE_ENDIAN);
3187}
3188
3189uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3190 MemTxAttrs attrs, MemTxResult *result)
3191{
3192 return address_space_ldq_internal(as, addr, attrs, result,
3193 DEVICE_BIG_ENDIAN);
3194}
3195
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003196uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003197{
Peter Maydell50013112015-04-26 16:49:24 +01003198 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003199}
3200
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003201uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003202{
Peter Maydell50013112015-04-26 16:49:24 +01003203 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003204}
3205
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003206uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003207{
Peter Maydell50013112015-04-26 16:49:24 +01003208 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003209}
3210
bellardaab33092005-10-30 20:48:42 +00003211/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003212uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3213 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003214{
3215 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003216 MemTxResult r;
3217
3218 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3219 if (result) {
3220 *result = r;
3221 }
bellardaab33092005-10-30 20:48:42 +00003222 return val;
3223}
3224
Peter Maydell50013112015-04-26 16:49:24 +01003225uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3226{
3227 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3228}
3229
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003230/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003231static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3232 hwaddr addr,
3233 MemTxAttrs attrs,
3234 MemTxResult *result,
3235 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003236{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003237 uint8_t *ptr;
3238 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003239 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003240 hwaddr l = 2;
3241 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003242 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003243 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003244
Paolo Bonzini41063e12015-03-18 14:21:43 +01003245 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003246 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003247 false);
3248 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003249 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003250
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003251 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003252 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003253#if defined(TARGET_WORDS_BIGENDIAN)
3254 if (endian == DEVICE_LITTLE_ENDIAN) {
3255 val = bswap16(val);
3256 }
3257#else
3258 if (endian == DEVICE_BIG_ENDIAN) {
3259 val = bswap16(val);
3260 }
3261#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003262 } else {
3263 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003264 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003265 switch (endian) {
3266 case DEVICE_LITTLE_ENDIAN:
3267 val = lduw_le_p(ptr);
3268 break;
3269 case DEVICE_BIG_ENDIAN:
3270 val = lduw_be_p(ptr);
3271 break;
3272 default:
3273 val = lduw_p(ptr);
3274 break;
3275 }
Peter Maydell50013112015-04-26 16:49:24 +01003276 r = MEMTX_OK;
3277 }
3278 if (result) {
3279 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003280 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003281 if (release_lock) {
3282 qemu_mutex_unlock_iothread();
3283 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003284 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003285 return val;
bellardaab33092005-10-30 20:48:42 +00003286}
3287
Peter Maydell50013112015-04-26 16:49:24 +01003288uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3289 MemTxAttrs attrs, MemTxResult *result)
3290{
3291 return address_space_lduw_internal(as, addr, attrs, result,
3292 DEVICE_NATIVE_ENDIAN);
3293}
3294
3295uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3296 MemTxAttrs attrs, MemTxResult *result)
3297{
3298 return address_space_lduw_internal(as, addr, attrs, result,
3299 DEVICE_LITTLE_ENDIAN);
3300}
3301
3302uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3303 MemTxAttrs attrs, MemTxResult *result)
3304{
3305 return address_space_lduw_internal(as, addr, attrs, result,
3306 DEVICE_BIG_ENDIAN);
3307}
3308
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003309uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003310{
Peter Maydell50013112015-04-26 16:49:24 +01003311 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003312}
3313
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003314uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003315{
Peter Maydell50013112015-04-26 16:49:24 +01003316 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003317}
3318
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003319uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003320{
Peter Maydell50013112015-04-26 16:49:24 +01003321 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003322}
3323
bellard8df1cd02005-01-28 22:37:22 +00003324/* warning: addr must be aligned. The ram page is not masked as dirty
3325 and the code inside is not invalidated. It is useful if the dirty
3326 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003327void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3328 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003329{
bellard8df1cd02005-01-28 22:37:22 +00003330 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003331 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003332 hwaddr l = 4;
3333 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003334 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003335 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003336 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003337
Paolo Bonzini41063e12015-03-18 14:21:43 +01003338 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003339 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003340 true);
3341 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003342 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003343
Peter Maydell50013112015-04-26 16:49:24 +01003344 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003345 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003346 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003347 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003348
Paolo Bonzini845b6212015-03-23 11:45:53 +01003349 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3350 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003351 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3352 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003353 r = MEMTX_OK;
3354 }
3355 if (result) {
3356 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003357 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003358 if (release_lock) {
3359 qemu_mutex_unlock_iothread();
3360 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003361 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003362}
3363
Peter Maydell50013112015-04-26 16:49:24 +01003364void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3365{
3366 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3367}
3368
bellard8df1cd02005-01-28 22:37:22 +00003369/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003370static inline void address_space_stl_internal(AddressSpace *as,
3371 hwaddr addr, uint32_t val,
3372 MemTxAttrs attrs,
3373 MemTxResult *result,
3374 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003375{
bellard8df1cd02005-01-28 22:37:22 +00003376 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003377 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003378 hwaddr l = 4;
3379 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003380 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003381 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003382
Paolo Bonzini41063e12015-03-18 14:21:43 +01003383 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003384 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003385 true);
3386 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003387 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003388
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003389#if defined(TARGET_WORDS_BIGENDIAN)
3390 if (endian == DEVICE_LITTLE_ENDIAN) {
3391 val = bswap32(val);
3392 }
3393#else
3394 if (endian == DEVICE_BIG_ENDIAN) {
3395 val = bswap32(val);
3396 }
3397#endif
Peter Maydell50013112015-04-26 16:49:24 +01003398 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003399 } else {
bellard8df1cd02005-01-28 22:37:22 +00003400 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003401 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003402 switch (endian) {
3403 case DEVICE_LITTLE_ENDIAN:
3404 stl_le_p(ptr, val);
3405 break;
3406 case DEVICE_BIG_ENDIAN:
3407 stl_be_p(ptr, val);
3408 break;
3409 default:
3410 stl_p(ptr, val);
3411 break;
3412 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003413 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003414 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003415 }
Peter Maydell50013112015-04-26 16:49:24 +01003416 if (result) {
3417 *result = r;
3418 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003419 if (release_lock) {
3420 qemu_mutex_unlock_iothread();
3421 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003422 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003423}
3424
3425void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3426 MemTxAttrs attrs, MemTxResult *result)
3427{
3428 address_space_stl_internal(as, addr, val, attrs, result,
3429 DEVICE_NATIVE_ENDIAN);
3430}
3431
3432void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3433 MemTxAttrs attrs, MemTxResult *result)
3434{
3435 address_space_stl_internal(as, addr, val, attrs, result,
3436 DEVICE_LITTLE_ENDIAN);
3437}
3438
3439void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3440 MemTxAttrs attrs, MemTxResult *result)
3441{
3442 address_space_stl_internal(as, addr, val, attrs, result,
3443 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003444}
3445
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003446void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003447{
Peter Maydell50013112015-04-26 16:49:24 +01003448 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003449}
3450
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003451void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003452{
Peter Maydell50013112015-04-26 16:49:24 +01003453 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003454}
3455
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003456void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003457{
Peter Maydell50013112015-04-26 16:49:24 +01003458 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003459}
3460
bellardaab33092005-10-30 20:48:42 +00003461/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003462void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3463 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003464{
3465 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003466 MemTxResult r;
3467
3468 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3469 if (result) {
3470 *result = r;
3471 }
3472}
3473
3474void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3475{
3476 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003477}
3478
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003479/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003480static inline void address_space_stw_internal(AddressSpace *as,
3481 hwaddr addr, uint32_t val,
3482 MemTxAttrs attrs,
3483 MemTxResult *result,
3484 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003485{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003486 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003487 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003488 hwaddr l = 2;
3489 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003490 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003491 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003492
Paolo Bonzini41063e12015-03-18 14:21:43 +01003493 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003494 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003495 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003496 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003497
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003498#if defined(TARGET_WORDS_BIGENDIAN)
3499 if (endian == DEVICE_LITTLE_ENDIAN) {
3500 val = bswap16(val);
3501 }
3502#else
3503 if (endian == DEVICE_BIG_ENDIAN) {
3504 val = bswap16(val);
3505 }
3506#endif
Peter Maydell50013112015-04-26 16:49:24 +01003507 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003508 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003509 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003510 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003511 switch (endian) {
3512 case DEVICE_LITTLE_ENDIAN:
3513 stw_le_p(ptr, val);
3514 break;
3515 case DEVICE_BIG_ENDIAN:
3516 stw_be_p(ptr, val);
3517 break;
3518 default:
3519 stw_p(ptr, val);
3520 break;
3521 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003522 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003523 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003524 }
Peter Maydell50013112015-04-26 16:49:24 +01003525 if (result) {
3526 *result = r;
3527 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003528 if (release_lock) {
3529 qemu_mutex_unlock_iothread();
3530 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003531 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003532}
3533
3534void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3535 MemTxAttrs attrs, MemTxResult *result)
3536{
3537 address_space_stw_internal(as, addr, val, attrs, result,
3538 DEVICE_NATIVE_ENDIAN);
3539}
3540
3541void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3542 MemTxAttrs attrs, MemTxResult *result)
3543{
3544 address_space_stw_internal(as, addr, val, attrs, result,
3545 DEVICE_LITTLE_ENDIAN);
3546}
3547
3548void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3549 MemTxAttrs attrs, MemTxResult *result)
3550{
3551 address_space_stw_internal(as, addr, val, attrs, result,
3552 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003553}
3554
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003555void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003556{
Peter Maydell50013112015-04-26 16:49:24 +01003557 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003558}
3559
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003560void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003561{
Peter Maydell50013112015-04-26 16:49:24 +01003562 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003563}
3564
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003565void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003566{
Peter Maydell50013112015-04-26 16:49:24 +01003567 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003568}
3569
bellardaab33092005-10-30 20:48:42 +00003570/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003571void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3572 MemTxAttrs attrs, MemTxResult *result)
3573{
3574 MemTxResult r;
3575 val = tswap64(val);
3576 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3577 if (result) {
3578 *result = r;
3579 }
3580}
3581
3582void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3583 MemTxAttrs attrs, MemTxResult *result)
3584{
3585 MemTxResult r;
3586 val = cpu_to_le64(val);
3587 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3588 if (result) {
3589 *result = r;
3590 }
3591}
3592void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3593 MemTxAttrs attrs, MemTxResult *result)
3594{
3595 MemTxResult r;
3596 val = cpu_to_be64(val);
3597 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3598 if (result) {
3599 *result = r;
3600 }
3601}
3602
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003603void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003604{
Peter Maydell50013112015-04-26 16:49:24 +01003605 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003606}
3607
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003608void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003609{
Peter Maydell50013112015-04-26 16:49:24 +01003610 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003611}
3612
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003613void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003614{
Peter Maydell50013112015-04-26 16:49:24 +01003615 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003616}
3617
aliguori5e2972f2009-03-28 17:51:36 +00003618/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003619int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003620 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003621{
3622 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003623 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003624 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003625
3626 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003627 int asidx;
3628 MemTxAttrs attrs;
3629
bellard13eb76e2004-01-24 15:23:36 +00003630 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003631 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3632 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003633 /* if no physical page mapped, return an error */
3634 if (phys_addr == -1)
3635 return -1;
3636 l = (page + TARGET_PAGE_SIZE) - addr;
3637 if (l > len)
3638 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003639 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003640 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003641 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3642 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003643 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003644 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3645 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003646 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003647 }
bellard13eb76e2004-01-24 15:23:36 +00003648 len -= l;
3649 buf += l;
3650 addr += l;
3651 }
3652 return 0;
3653}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003654
3655/*
3656 * Allows code that needs to deal with migration bitmaps etc to still be built
3657 * target independent.
3658 */
3659size_t qemu_target_page_bits(void)
3660{
3661 return TARGET_PAGE_BITS;
3662}
3663
Paul Brooka68fe892010-03-01 00:08:59 +00003664#endif
bellard13eb76e2004-01-24 15:23:36 +00003665
Blue Swirl8e4a4242013-01-06 18:30:17 +00003666/*
3667 * A helper function for the _utterly broken_ virtio device model to find out if
3668 * it's running on a big endian machine. Don't do this at home kids!
3669 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003670bool target_words_bigendian(void);
3671bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003672{
3673#if defined(TARGET_WORDS_BIGENDIAN)
3674 return true;
3675#else
3676 return false;
3677#endif
3678}
3679
Wen Congyang76f35532012-05-07 12:04:18 +08003680#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003681bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003682{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003683 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003684 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003685 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003686
Paolo Bonzini41063e12015-03-18 14:21:43 +01003687 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003688 mr = address_space_translate(&address_space_memory,
3689 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003690
Paolo Bonzini41063e12015-03-18 14:21:43 +01003691 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3692 rcu_read_unlock();
3693 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003694}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003695
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003696int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003697{
3698 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003699 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003700
Mike Day0dc3f442013-09-05 14:41:35 -04003701 rcu_read_lock();
3702 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003703 ret = func(block->idstr, block->host, block->offset,
3704 block->used_length, opaque);
3705 if (ret) {
3706 break;
3707 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003708 }
Mike Day0dc3f442013-09-05 14:41:35 -04003709 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003710 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003711}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003712#endif