blob: f3c2770d54e7e43f31f4b70773c88444b96a4b45 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Peter Maydell20bccb82016-10-24 16:26:49 +010096#ifdef TARGET_PAGE_BITS_VARY
97int target_page_bits;
98bool target_page_bits_decided;
99#endif
100
Andreas Färberbdc44642013-06-24 23:50:24 +0200101struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +0000102/* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +0200104__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000105/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000106 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000107 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100108int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000109
Peter Maydell20bccb82016-10-24 16:26:49 +0100110bool set_preferred_target_page_bits(int bits)
111{
112 /* The target page size is the lowest common denominator for all
113 * the CPUs in the system, so we can only make it smaller, never
114 * larger. And we can't make it smaller once we've committed to
115 * a particular size.
116 */
117#ifdef TARGET_PAGE_BITS_VARY
118 assert(bits >= TARGET_PAGE_BITS_MIN);
119 if (target_page_bits == 0 || target_page_bits > bits) {
120 if (target_page_bits_decided) {
121 return false;
122 }
123 target_page_bits = bits;
124 }
125#endif
126 return true;
127}
128
pbrooke2eef172008-06-08 01:09:01 +0000129#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200130
Peter Maydell20bccb82016-10-24 16:26:49 +0100131static void finalize_target_page_bits(void)
132{
133#ifdef TARGET_PAGE_BITS_VARY
134 if (target_page_bits == 0) {
135 target_page_bits = TARGET_PAGE_BITS_MIN;
136 }
137 target_page_bits_decided = true;
138#endif
139}
140
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141typedef struct PhysPageEntry PhysPageEntry;
142
143struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200144 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200145 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200146 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200147 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200150#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
151
Paolo Bonzini03f49952013-11-07 17:14:36 +0100152/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100153#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100154
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200155#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100156#define P_L2_SIZE (1 << P_L2_BITS)
157
158#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
159
160typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200161
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200162typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100163 struct rcu_head rcu;
164
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 unsigned sections_nb;
166 unsigned sections_nb_alloc;
167 unsigned nodes_nb;
168 unsigned nodes_nb_alloc;
169 Node *nodes;
170 MemoryRegionSection *sections;
171} PhysPageMap;
172
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200173struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100174 struct rcu_head rcu;
175
Fam Zheng729633c2016-03-01 14:18:24 +0800176 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200177 /* This is a multi-level map on the physical address space.
178 * The bottom level has pointers to MemoryRegionSections.
179 */
180 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200181 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200182 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200183};
184
Jan Kiszka90260c62013-05-26 21:46:51 +0200185#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186typedef struct subpage_t {
187 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200188 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200189 hwaddr base;
Vijaya Kumar K2615fab2016-10-24 16:26:49 +0100190 uint16_t sub_section[];
Jan Kiszka90260c62013-05-26 21:46:51 +0200191} subpage_t;
192
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200193#define PHYS_SECTION_UNASSIGNED 0
194#define PHYS_SECTION_NOTDIRTY 1
195#define PHYS_SECTION_ROM 2
196#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
pbrooke2eef172008-06-08 01:09:01 +0000198static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300199static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000200static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000201
Avi Kivity1ec9b902012-01-02 12:47:48 +0200202static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100203
204/**
205 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
206 * @cpu: the CPU whose AddressSpace this is
207 * @as: the AddressSpace itself
208 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
209 * @tcg_as_listener: listener for tracking changes to the AddressSpace
210 */
211struct CPUAddressSpace {
212 CPUState *cpu;
213 AddressSpace *as;
214 struct AddressSpaceDispatch *memory_dispatch;
215 MemoryListener tcg_as_listener;
216};
217
pbrook6658ffb2007-03-16 23:58:11 +0000218#endif
bellard54936002003-05-13 00:25:15 +0000219
Paul Brook6d9a1302010-02-28 23:55:53 +0000220#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200221
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200222static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223{
Peter Lieven101420b2016-07-15 12:03:50 +0200224 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200225 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200226 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200227 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
228 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200229 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200230 }
231}
232
Paolo Bonzinidb946042015-05-21 15:12:29 +0200233static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200234{
235 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200236 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200237 PhysPageEntry e;
238 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200239
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200240 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200241 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200242 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200243 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200244
245 e.skip = leaf ? 0 : 1;
246 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100247 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200248 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200249 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200250 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200251}
252
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200253static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
254 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200255 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200256{
257 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100258 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200259
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200260 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200261 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200262 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200263 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100264 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200265
Paolo Bonzini03f49952013-11-07 17:14:36 +0100266 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200267 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200268 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200269 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200270 *index += step;
271 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200272 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200273 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200274 }
275 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200276 }
277}
278
Avi Kivityac1970f2012-10-03 16:22:53 +0200279static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200280 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200281 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000282{
Avi Kivity29990972012-02-13 20:21:20 +0200283 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200284 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000285
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200286 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000287}
288
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200289/* Compact a non leaf page entry. Simply detect that the entry has a single child,
290 * and update our entry so we can skip it and go directly to the destination.
291 */
Marc-André Lureauefee6782016-09-28 16:37:20 +0400292static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200293{
294 unsigned valid_ptr = P_L2_SIZE;
295 int valid = 0;
296 PhysPageEntry *p;
297 int i;
298
299 if (lp->ptr == PHYS_MAP_NODE_NIL) {
300 return;
301 }
302
303 p = nodes[lp->ptr];
304 for (i = 0; i < P_L2_SIZE; i++) {
305 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
306 continue;
307 }
308
309 valid_ptr = i;
310 valid++;
311 if (p[i].skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400312 phys_page_compact(&p[i], nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200313 }
314 }
315
316 /* We can only compress if there's only one child. */
317 if (valid != 1) {
318 return;
319 }
320
321 assert(valid_ptr < P_L2_SIZE);
322
323 /* Don't compress if it won't fit in the # of bits we have. */
324 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
325 return;
326 }
327
328 lp->ptr = p[valid_ptr].ptr;
329 if (!p[valid_ptr].skip) {
330 /* If our only child is a leaf, make this a leaf. */
331 /* By design, we should have made this node a leaf to begin with so we
332 * should never reach here.
333 * But since it's so simple to handle this, let's do it just in case we
334 * change this rule.
335 */
336 lp->skip = 0;
337 } else {
338 lp->skip += p[valid_ptr].skip;
339 }
340}
341
342static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
343{
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 if (d->phys_map.skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400345 phys_page_compact(&d->phys_map, d->map.nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200346 }
347}
348
Fam Zheng29cb5332016-03-01 14:18:23 +0800349static inline bool section_covers_addr(const MemoryRegionSection *section,
350 hwaddr addr)
351{
352 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
353 * the section must cover the entire address space.
354 */
Richard Henderson258dfaa2016-06-29 15:48:03 -0700355 return int128_gethi(section->size) ||
Fam Zheng29cb5332016-03-01 14:18:23 +0800356 range_covers_byte(section->offset_within_address_space,
Richard Henderson258dfaa2016-06-29 15:48:03 -0700357 int128_getlo(section->size), addr);
Fam Zheng29cb5332016-03-01 14:18:23 +0800358}
359
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200360static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200361 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000362{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200363 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200364 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200365 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200366
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200367 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200368 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200369 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200370 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200371 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100372 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200373 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200374
Fam Zheng29cb5332016-03-01 14:18:23 +0800375 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200376 return &sections[lp.ptr];
377 } else {
378 return &sections[PHYS_SECTION_UNASSIGNED];
379 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200380}
381
Blue Swirle5548612012-04-21 13:08:33 +0000382bool memory_region_is_unassigned(MemoryRegion *mr)
383{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200384 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000385 && mr != &io_mem_watch;
386}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100388/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200389static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200390 hwaddr addr,
391 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200392{
Fam Zheng729633c2016-03-01 14:18:24 +0800393 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200394 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800395 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200396
Fam Zheng729633c2016-03-01 14:18:24 +0800397 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
398 section_covers_addr(section, addr)) {
399 update = false;
400 } else {
401 section = phys_page_find(d->phys_map, addr, d->map.nodes,
402 d->map.sections);
403 update = true;
404 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200405 if (resolve_subpage && section->mr->subpage) {
406 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200407 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200408 }
Fam Zheng729633c2016-03-01 14:18:24 +0800409 if (update) {
410 atomic_set(&d->mru_section, section);
411 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200412 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200413}
414
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100415/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200416static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200417address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200418 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200419{
420 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200421 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100422 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200423
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200424 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200425 /* Compute offset within MemoryRegionSection */
426 addr -= section->offset_within_address_space;
427
428 /* Compute offset within MemoryRegion */
429 *xlat = addr + section->offset_within_region;
430
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200431 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200432
433 /* MMIO registers can be expected to perform full-width accesses based only
434 * on their address, without considering adjacent registers that could
435 * decode to completely different MemoryRegions. When such registers
436 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
437 * regions overlap wildly. For this reason we cannot clamp the accesses
438 * here.
439 *
440 * If the length is small (as is the case for address_space_ldl/stl),
441 * everything works fine. If the incoming length is large, however,
442 * the caller really has to do the clamping through memory_access_size.
443 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200444 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200445 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200446 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
447 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200448 return section;
449}
Jan Kiszka90260c62013-05-26 21:46:51 +0200450
Paolo Bonzini41063e12015-03-18 14:21:43 +0100451/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200452MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
453 hwaddr *xlat, hwaddr *plen,
454 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200455{
Avi Kivity30951152012-10-30 13:47:46 +0200456 IOMMUTLBEntry iotlb;
457 MemoryRegionSection *section;
458 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200459
460 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100461 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
462 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200463 mr = section->mr;
464
465 if (!mr->iommu_ops) {
466 break;
467 }
468
Le Tan8d7b8cb2014-08-16 13:55:37 +0800469 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200470 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
471 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700472 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200473 if (!(iotlb.perm & (1 << is_write))) {
474 mr = &io_mem_unassigned;
475 break;
476 }
477
478 as = iotlb.target_as;
479 }
480
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000481 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100482 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700483 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100484 }
485
Avi Kivity30951152012-10-30 13:47:46 +0200486 *xlat = addr;
487 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200488}
489
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100490/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200491MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000492address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200493 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200494{
Avi Kivity30951152012-10-30 13:47:46 +0200495 MemoryRegionSection *section;
Alex Bennéef35e44e2016-10-21 16:34:18 +0100496 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
Peter Maydelld7898cd2016-01-21 14:15:05 +0000497
498 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200499
500 assert(!section->mr->iommu_ops);
501 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200502}
bellard9fa3e852004-01-04 18:06:42 +0000503#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000504
Andreas Färberb170fce2013-01-20 20:23:22 +0100505#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000506
Juan Quintelae59fb372009-09-29 22:48:21 +0200507static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200508{
Andreas Färber259186a2013-01-17 18:51:17 +0100509 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510
aurel323098dba2009-03-07 21:28:24 +0000511 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
512 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100513 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100514 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000515
516 return 0;
517}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200518
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400519static int cpu_common_pre_load(void *opaque)
520{
521 CPUState *cpu = opaque;
522
Paolo Bonziniadee6422014-12-19 12:53:14 +0100523 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524
525 return 0;
526}
527
528static bool cpu_common_exception_index_needed(void *opaque)
529{
530 CPUState *cpu = opaque;
531
Paolo Bonziniadee6422014-12-19 12:53:14 +0100532 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400533}
534
535static const VMStateDescription vmstate_cpu_common_exception_index = {
536 .name = "cpu_common/exception_index",
537 .version_id = 1,
538 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200539 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400540 .fields = (VMStateField[]) {
541 VMSTATE_INT32(exception_index, CPUState),
542 VMSTATE_END_OF_LIST()
543 }
544};
545
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300546static bool cpu_common_crash_occurred_needed(void *opaque)
547{
548 CPUState *cpu = opaque;
549
550 return cpu->crash_occurred;
551}
552
553static const VMStateDescription vmstate_cpu_common_crash_occurred = {
554 .name = "cpu_common/crash_occurred",
555 .version_id = 1,
556 .minimum_version_id = 1,
557 .needed = cpu_common_crash_occurred_needed,
558 .fields = (VMStateField[]) {
559 VMSTATE_BOOL(crash_occurred, CPUState),
560 VMSTATE_END_OF_LIST()
561 }
562};
563
Andreas Färber1a1562f2013-06-17 04:09:11 +0200564const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200565 .name = "cpu_common",
566 .version_id = 1,
567 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400568 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200569 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200570 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100571 VMSTATE_UINT32(halted, CPUState),
572 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200573 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400574 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200575 .subsections = (const VMStateDescription*[]) {
576 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300577 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200578 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200579 }
580};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200581
pbrook9656f322008-07-01 20:01:19 +0000582#endif
583
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100584CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400585{
Andreas Färberbdc44642013-06-24 23:50:24 +0200586 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400587
Andreas Färberbdc44642013-06-24 23:50:24 +0200588 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100589 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200590 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100591 }
Glauber Costa950f1472009-06-09 12:15:18 -0400592 }
593
Andreas Färberbdc44642013-06-24 23:50:24 +0200594 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400595}
596
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000597#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000598void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000600 CPUAddressSpace *newas;
601
602 /* Target code should have set num_ases before calling us */
603 assert(asidx < cpu->num_ases);
604
Peter Maydell56943e82016-01-21 14:15:04 +0000605 if (asidx == 0) {
606 /* address space 0 gets the convenience alias */
607 cpu->as = as;
608 }
609
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000610 /* KVM cannot currently support multiple address spaces. */
611 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000612
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000613 if (!cpu->cpu_ases) {
614 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000615 }
Peter Maydell32857f42015-10-01 15:29:50 +0100616
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000617 newas = &cpu->cpu_ases[asidx];
618 newas->cpu = cpu;
619 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000620 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000621 newas->tcg_as_listener.commit = tcg_commit;
622 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000623 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000624}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000625
626AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
627{
628 /* Return the AddressSpace corresponding to the specified index */
629 return cpu->cpu_ases[asidx].as;
630}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000631#endif
632
Laurent Vivier7bbc1242016-10-20 13:26:04 +0200633void cpu_exec_unrealizefn(CPUState *cpu)
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530634{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530635 CPUClass *cc = CPU_GET_CLASS(cpu);
636
Paolo Bonzini267f6852016-08-28 03:45:14 +0200637 cpu_list_remove(cpu);
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530638
639 if (cc->vmsd != NULL) {
640 vmstate_unregister(NULL, cc->vmsd, cpu);
641 }
642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
643 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
644 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530645}
646
Laurent Vivier39e329e2016-10-20 13:26:02 +0200647void cpu_exec_initfn(CPUState *cpu)
bellardfd6ce8f2003-05-14 19:00:11 +0000648{
Peter Maydell56943e82016-01-21 14:15:04 +0000649 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000650 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000651
Eduardo Habkost291135b2015-04-27 17:00:33 -0300652#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300653 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000654
655 /* This is a softmmu CPU object, so create a property for it
656 * so users can wire up its memory. (This can't go in qom/cpu.c
657 * because that file is compiled only once for both user-mode
658 * and system builds.) The default if no link is set up is to use
659 * the system address space.
660 */
661 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
662 (Object **)&cpu->memory,
663 qdev_prop_allow_set_link_before_realize,
664 OBJ_PROP_LINK_UNREF_ON_RELEASE,
665 &error_abort);
666 cpu->memory = system_memory;
667 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300668#endif
Laurent Vivier39e329e2016-10-20 13:26:02 +0200669}
670
Laurent Vivierce5b1bb2016-10-20 13:26:03 +0200671void cpu_exec_realizefn(CPUState *cpu, Error **errp)
Laurent Vivier39e329e2016-10-20 13:26:02 +0200672{
673 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Eduardo Habkost291135b2015-04-27 17:00:33 -0300674
Paolo Bonzini267f6852016-08-28 03:45:14 +0200675 cpu_list_add(cpu);
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200676
677#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200678 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200679 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200680 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100681 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200682 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100683 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200684#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000685}
686
Paul Brook94df27f2010-02-28 23:47:45 +0000687#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200688static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000689{
Alex Bennéeba051fb2016-10-27 16:10:16 +0100690 mmap_lock();
691 tb_lock();
Paul Brook94df27f2010-02-28 23:47:45 +0000692 tb_invalidate_phys_page_range(pc, pc + 1, 0);
Alex Bennéeba051fb2016-10-27 16:10:16 +0100693 tb_unlock();
694 mmap_unlock();
Paul Brook94df27f2010-02-28 23:47:45 +0000695}
696#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200697static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400698{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000699 MemTxAttrs attrs;
700 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
701 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400702 if (phys != -1) {
Alex Bennéeba051fb2016-10-27 16:10:16 +0100703 /* Locks grabbed by tb_invalidate_phys_addr */
Peter Maydell5232e4c2016-01-21 14:15:06 +0000704 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100705 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400706 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400707}
bellardc27004e2005-01-03 23:35:10 +0000708#endif
bellardd720b932004-04-25 17:57:43 +0000709
Paul Brookc527ee82010-03-01 03:31:14 +0000710#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200711void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000712
713{
714}
715
Peter Maydell3ee887e2014-09-12 14:06:48 +0100716int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
717 int flags)
718{
719 return -ENOSYS;
720}
721
722void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
723{
724}
725
Andreas Färber75a34032013-09-02 16:57:02 +0200726int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000727 int flags, CPUWatchpoint **watchpoint)
728{
729 return -ENOSYS;
730}
731#else
pbrook6658ffb2007-03-16 23:58:11 +0000732/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200733int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000734 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000735{
aliguoric0ce9982008-11-25 22:13:57 +0000736 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000737
Peter Maydell05068c02014-09-12 14:06:48 +0100738 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700739 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200740 error_report("tried to set invalid watchpoint at %"
741 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000742 return -EINVAL;
743 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500744 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000745
aliguoria1d1bb32008-11-18 20:07:32 +0000746 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100747 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000748 wp->flags = flags;
749
aliguori2dc9f412008-11-18 20:56:59 +0000750 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200751 if (flags & BP_GDB) {
752 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
753 } else {
754 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
755 }
aliguoria1d1bb32008-11-18 20:07:32 +0000756
Andreas Färber31b030d2013-09-04 01:29:02 +0200757 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000758
759 if (watchpoint)
760 *watchpoint = wp;
761 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000762}
763
aliguoria1d1bb32008-11-18 20:07:32 +0000764/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200765int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000766 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000767{
aliguoria1d1bb32008-11-18 20:07:32 +0000768 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000769
Andreas Färberff4700b2013-08-26 18:23:18 +0200770 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100771 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000772 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200773 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000774 return 0;
775 }
776 }
aliguoria1d1bb32008-11-18 20:07:32 +0000777 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000778}
779
aliguoria1d1bb32008-11-18 20:07:32 +0000780/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200781void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000782{
Andreas Färberff4700b2013-08-26 18:23:18 +0200783 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000784
Andreas Färber31b030d2013-09-04 01:29:02 +0200785 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000786
Anthony Liguori7267c092011-08-20 22:09:37 -0500787 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000788}
789
aliguoria1d1bb32008-11-18 20:07:32 +0000790/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200791void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000792{
aliguoric0ce9982008-11-25 22:13:57 +0000793 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000794
Andreas Färberff4700b2013-08-26 18:23:18 +0200795 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200796 if (wp->flags & mask) {
797 cpu_watchpoint_remove_by_ref(cpu, wp);
798 }
aliguoric0ce9982008-11-25 22:13:57 +0000799 }
aliguoria1d1bb32008-11-18 20:07:32 +0000800}
Peter Maydell05068c02014-09-12 14:06:48 +0100801
802/* Return true if this watchpoint address matches the specified
803 * access (ie the address range covered by the watchpoint overlaps
804 * partially or completely with the address range covered by the
805 * access).
806 */
807static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
808 vaddr addr,
809 vaddr len)
810{
811 /* We know the lengths are non-zero, but a little caution is
812 * required to avoid errors in the case where the range ends
813 * exactly at the top of the address space and so addr + len
814 * wraps round to zero.
815 */
816 vaddr wpend = wp->vaddr + wp->len - 1;
817 vaddr addrend = addr + len - 1;
818
819 return !(addr > wpend || wp->vaddr > addrend);
820}
821
Paul Brookc527ee82010-03-01 03:31:14 +0000822#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000823
824/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200825int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000826 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000827{
aliguoric0ce9982008-11-25 22:13:57 +0000828 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000829
Anthony Liguori7267c092011-08-20 22:09:37 -0500830 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000831
832 bp->pc = pc;
833 bp->flags = flags;
834
aliguori2dc9f412008-11-18 20:56:59 +0000835 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200836 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200838 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200839 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200840 }
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färberf0c3c502013-08-26 21:22:53 +0200842 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000843
Andreas Färber00b941e2013-06-29 18:55:54 +0200844 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000845 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200846 }
aliguoria1d1bb32008-11-18 20:07:32 +0000847 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000848}
849
850/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200851int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000852{
aliguoria1d1bb32008-11-18 20:07:32 +0000853 CPUBreakpoint *bp;
854
Andreas Färberf0c3c502013-08-26 21:22:53 +0200855 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000856 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200857 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000858 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000859 }
bellard4c3a88a2003-07-26 12:06:08 +0000860 }
aliguoria1d1bb32008-11-18 20:07:32 +0000861 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000862}
863
aliguoria1d1bb32008-11-18 20:07:32 +0000864/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200865void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000866{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200867 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
868
869 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000870
Anthony Liguori7267c092011-08-20 22:09:37 -0500871 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000872}
873
874/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200875void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000876{
aliguoric0ce9982008-11-25 22:13:57 +0000877 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000878
Andreas Färberf0c3c502013-08-26 21:22:53 +0200879 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200880 if (bp->flags & mask) {
881 cpu_breakpoint_remove_by_ref(cpu, bp);
882 }
aliguoric0ce9982008-11-25 22:13:57 +0000883 }
bellard4c3a88a2003-07-26 12:06:08 +0000884}
885
bellardc33a3462003-07-29 20:50:33 +0000886/* enable or disable single step mode. EXCP_DEBUG is returned by the
887 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200888void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000889{
Andreas Färbered2803d2013-06-21 20:20:45 +0200890 if (cpu->singlestep_enabled != enabled) {
891 cpu->singlestep_enabled = enabled;
892 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200893 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200894 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100895 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000896 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700897 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000898 }
bellardc33a3462003-07-29 20:50:33 +0000899 }
bellardc33a3462003-07-29 20:50:33 +0000900}
901
Andreas Färbera47dddd2013-09-03 17:38:47 +0200902void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000903{
904 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000905 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000906
907 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000908 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000909 fprintf(stderr, "qemu: fatal: ");
910 vfprintf(stderr, fmt, ap);
911 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200912 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100913 if (qemu_log_separate()) {
aliguori93fcfe32009-01-15 22:34:14 +0000914 qemu_log("qemu: fatal: ");
915 qemu_log_vprintf(fmt, ap2);
916 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200917 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000918 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000919 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000920 }
pbrook493ae1f2007-11-23 16:53:59 +0000921 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000922 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300923 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200924#if defined(CONFIG_USER_ONLY)
925 {
926 struct sigaction act;
927 sigfillset(&act.sa_mask);
928 act.sa_handler = SIG_DFL;
929 sigaction(SIGABRT, &act, NULL);
930 }
931#endif
bellard75012672003-06-21 13:11:07 +0000932 abort();
933}
934
bellard01243112004-01-04 15:48:17 +0000935#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400936/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200937static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
938{
939 RAMBlock *block;
940
Paolo Bonzini43771532013-09-09 17:58:40 +0200941 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200942 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200943 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200944 }
Mike Day0dc3f442013-09-05 14:41:35 -0400945 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200946 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200947 goto found;
948 }
949 }
950
951 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
952 abort();
953
954found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200955 /* It is safe to write mru_block outside the iothread lock. This
956 * is what happens:
957 *
958 * mru_block = xxx
959 * rcu_read_unlock()
960 * xxx removed from list
961 * rcu_read_lock()
962 * read mru_block
963 * mru_block = NULL;
964 * call_rcu(reclaim_ramblock, xxx);
965 * rcu_read_unlock()
966 *
967 * atomic_rcu_set is not needed here. The block was already published
968 * when it was placed into the list. Here we're just making an extra
969 * copy of the pointer.
970 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200971 ram_list.mru_block = block;
972 return block;
973}
974
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200975static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000976{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700977 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200978 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200979 RAMBlock *block;
980 ram_addr_t end;
981
982 end = TARGET_PAGE_ALIGN(start + length);
983 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000984
Mike Day0dc3f442013-09-05 14:41:35 -0400985 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200986 block = qemu_get_ram_block(start);
987 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200988 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700989 CPU_FOREACH(cpu) {
990 tlb_reset_dirty(cpu, start1, length);
991 }
Mike Day0dc3f442013-09-05 14:41:35 -0400992 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200993}
994
995/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000996bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
997 ram_addr_t length,
998 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200999{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001000 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001001 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001002 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001003
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001004 if (length == 0) {
1005 return false;
1006 }
1007
1008 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1009 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001010
1011 rcu_read_lock();
1012
1013 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1014
1015 while (page < end) {
1016 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1017 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1018 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1019
1020 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1021 offset, num);
1022 page += num;
1023 }
1024
1025 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001026
1027 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001028 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001029 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001030
1031 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001032}
1033
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001034/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001035hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001036 MemoryRegionSection *section,
1037 target_ulong vaddr,
1038 hwaddr paddr, hwaddr xlat,
1039 int prot,
1040 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001041{
Avi Kivitya8170e52012-10-23 12:30:10 +02001042 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001043 CPUWatchpoint *wp;
1044
Blue Swirlcc5bea62012-04-14 14:56:48 +00001045 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001046 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001047 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001048 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001049 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001050 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001051 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001052 }
1053 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001054 AddressSpaceDispatch *d;
1055
1056 d = atomic_rcu_read(&section->address_space->dispatch);
1057 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001058 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001059 }
1060
1061 /* Make accesses to pages with watchpoints go via the
1062 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001063 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001064 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001065 /* Avoid trapping reads of pages with a write breakpoint. */
1066 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001067 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001068 *address |= TLB_MMIO;
1069 break;
1070 }
1071 }
1072 }
1073
1074 return iotlb;
1075}
bellard9fa3e852004-01-04 18:06:42 +00001076#endif /* defined(CONFIG_USER_ONLY) */
1077
pbrooke2eef172008-06-08 01:09:01 +00001078#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001079
Anthony Liguoric227f092009-10-01 16:12:16 -05001080static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001081 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001082static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001083
Igor Mammedova2b257d2014-10-31 16:38:37 +00001084static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1085 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001086
1087/*
1088 * Set a custom physical guest memory alloator.
1089 * Accelerators with unusual needs may need this. Hopefully, we can
1090 * get rid of it eventually.
1091 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001092void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001093{
1094 phys_mem_alloc = alloc;
1095}
1096
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001097static uint16_t phys_section_add(PhysPageMap *map,
1098 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001099{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001100 /* The physical section number is ORed with a page-aligned
1101 * pointer to produce the iotlb entries. Thus it should
1102 * never overflow into the page-aligned value.
1103 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001104 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001105
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 if (map->sections_nb == map->sections_nb_alloc) {
1107 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1108 map->sections = g_renew(MemoryRegionSection, map->sections,
1109 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001110 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001111 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001112 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001113 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001114}
1115
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001116static void phys_section_destroy(MemoryRegion *mr)
1117{
Don Slutz55b4e802015-11-30 17:11:04 -05001118 bool have_sub_page = mr->subpage;
1119
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001120 memory_region_unref(mr);
1121
Don Slutz55b4e802015-11-30 17:11:04 -05001122 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001123 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001124 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001125 g_free(subpage);
1126 }
1127}
1128
Paolo Bonzini60926662013-05-29 12:30:26 +02001129static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001130{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001131 while (map->sections_nb > 0) {
1132 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001133 phys_section_destroy(section->mr);
1134 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001135 g_free(map->sections);
1136 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001137}
1138
Avi Kivityac1970f2012-10-03 16:22:53 +02001139static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001140{
1141 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001142 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001143 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001144 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001145 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146 MemoryRegionSection subsection = {
1147 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001148 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001149 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001150 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001151
Avi Kivityf3705d52012-03-08 16:16:34 +02001152 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001153
Avi Kivityf3705d52012-03-08 16:16:34 +02001154 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001155 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001156 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001157 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001158 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001159 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001160 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001161 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001162 }
1163 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001164 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001165 subpage_register(subpage, start, end,
1166 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001167}
1168
1169
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001170static void register_multipage(AddressSpaceDispatch *d,
1171 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001172{
Avi Kivitya8170e52012-10-23 12:30:10 +02001173 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001174 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001175 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1176 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001177
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001178 assert(num_pages);
1179 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001180}
1181
Avi Kivityac1970f2012-10-03 16:22:53 +02001182static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001183{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001184 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001185 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001186 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001187 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001188
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001189 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1190 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1191 - now.offset_within_address_space;
1192
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001193 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001194 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001195 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001196 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001197 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198 while (int128_ne(remain.size, now.size)) {
1199 remain.size = int128_sub(remain.size, now.size);
1200 remain.offset_within_address_space += int128_get64(now.size);
1201 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001202 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001203 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001204 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001205 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001206 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001207 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001208 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001209 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001210 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001211 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001212 }
1213}
1214
Sheng Yang62a27442010-01-26 19:21:16 +08001215void qemu_flush_coalesced_mmio_buffer(void)
1216{
1217 if (kvm_enabled())
1218 kvm_flush_coalesced_mmio_buffer();
1219}
1220
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001221void qemu_mutex_lock_ramlist(void)
1222{
1223 qemu_mutex_lock(&ram_list.mutex);
1224}
1225
1226void qemu_mutex_unlock_ramlist(void)
1227{
1228 qemu_mutex_unlock(&ram_list.mutex);
1229}
1230
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001231#ifdef __linux__
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001232static int64_t get_file_size(int fd)
1233{
1234 int64_t size = lseek(fd, 0, SEEK_END);
1235 if (size < 0) {
1236 return -errno;
1237 }
1238 return size;
1239}
1240
Alex Williamson04b16652010-07-02 11:13:17 -06001241static void *file_ram_alloc(RAMBlock *block,
1242 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001243 const char *path,
1244 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001245{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001246 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001247 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001248 char *sanitized_name;
1249 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001250 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001251 int fd = -1;
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001252 int64_t file_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001253
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001254 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1255 error_setg(errp,
1256 "host lacks kvm mmu notifiers, -mem-path unsupported");
1257 return NULL;
1258 }
1259
1260 for (;;) {
1261 fd = open(path, O_RDWR);
1262 if (fd >= 0) {
1263 /* @path names an existing file, use it */
1264 break;
1265 }
1266 if (errno == ENOENT) {
1267 /* @path names a file that doesn't exist, create it */
1268 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1269 if (fd >= 0) {
1270 unlink_on_error = true;
1271 break;
1272 }
1273 } else if (errno == EISDIR) {
1274 /* @path names a directory, create a file there */
1275 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1276 sanitized_name = g_strdup(memory_region_name(block->mr));
1277 for (c = sanitized_name; *c != '\0'; c++) {
1278 if (*c == '/') {
1279 *c = '_';
1280 }
1281 }
1282
1283 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1284 sanitized_name);
1285 g_free(sanitized_name);
1286
1287 fd = mkstemp(filename);
1288 if (fd >= 0) {
1289 unlink(filename);
1290 g_free(filename);
1291 break;
1292 }
1293 g_free(filename);
1294 }
1295 if (errno != EEXIST && errno != EINTR) {
1296 error_setg_errno(errp, errno,
1297 "can't open backing store %s for guest RAM",
1298 path);
1299 goto error;
1300 }
1301 /*
1302 * Try again on EINTR and EEXIST. The latter happens when
1303 * something else creates the file between our two open().
1304 */
1305 }
1306
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001307 block->page_size = qemu_fd_getpagesize(fd);
Haozhong Zhang83606682016-10-24 20:49:37 +08001308 block->mr->align = block->page_size;
1309#if defined(__s390x__)
1310 if (kvm_enabled()) {
1311 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1312 }
1313#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03001314
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001315 file_size = get_file_size(fd);
1316
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001317 if (memory < block->page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001318 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001319 "or larger than page size 0x%zx",
1320 memory, block->page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001321 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001322 }
1323
Haozhong Zhang1775f112016-11-02 09:05:51 +08001324 if (file_size > 0 && file_size < memory) {
1325 error_setg(errp, "backing store %s size 0x%" PRIx64
1326 " does not match 'size' option 0x" RAM_ADDR_FMT,
1327 path, file_size, memory);
1328 goto error;
1329 }
1330
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001331 memory = ROUND_UP(memory, block->page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001332
1333 /*
1334 * ftruncate is not supported by hugetlbfs in older
1335 * hosts, so don't bother bailing out on errors.
1336 * If anything goes wrong with it under other filesystems,
1337 * mmap will fail.
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001338 *
1339 * Do not truncate the non-empty backend file to avoid corrupting
1340 * the existing data in the file. Disabling shrinking is not
1341 * enough. For example, the current vNVDIMM implementation stores
1342 * the guest NVDIMM labels at the end of the backend file. If the
1343 * backend file is later extended, QEMU will not be able to find
1344 * those labels. Therefore, extending the non-empty backend file
1345 * is disabled as well.
Marcelo Tosattic9027602010-03-01 20:25:08 -03001346 */
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001347 if (!file_size && ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001348 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001349 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001350
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001351 area = qemu_ram_mmap(fd, memory, block->mr->align,
1352 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001353 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001354 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001355 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001356 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001357 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001358
1359 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001360 os_mem_prealloc(fd, area, memory, errp);
1361 if (errp && *errp) {
1362 goto error;
1363 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001364 }
1365
Alex Williamson04b16652010-07-02 11:13:17 -06001366 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001367 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001368
1369error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001370 if (area != MAP_FAILED) {
1371 qemu_ram_munmap(area, memory);
1372 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001373 if (unlink_on_error) {
1374 unlink(path);
1375 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001376 if (fd != -1) {
1377 close(fd);
1378 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001379 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001380}
1381#endif
1382
Mike Day0dc3f442013-09-05 14:41:35 -04001383/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001384static ram_addr_t find_ram_offset(ram_addr_t size)
1385{
Alex Williamson04b16652010-07-02 11:13:17 -06001386 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001387 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001388
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001389 assert(size != 0); /* it would hand out same offset multiple times */
1390
Mike Day0dc3f442013-09-05 14:41:35 -04001391 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001392 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001393 }
Alex Williamson04b16652010-07-02 11:13:17 -06001394
Mike Day0dc3f442013-09-05 14:41:35 -04001395 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001396 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001397
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001398 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001399
Mike Day0dc3f442013-09-05 14:41:35 -04001400 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001401 if (next_block->offset >= end) {
1402 next = MIN(next, next_block->offset);
1403 }
1404 }
1405 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001406 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001407 mingap = next - end;
1408 }
1409 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001410
1411 if (offset == RAM_ADDR_MAX) {
1412 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1413 (uint64_t)size);
1414 abort();
1415 }
1416
Alex Williamson04b16652010-07-02 11:13:17 -06001417 return offset;
1418}
1419
Juan Quintela652d7ec2012-07-20 10:37:54 +02001420ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001421{
Alex Williamsond17b5282010-06-25 11:08:38 -06001422 RAMBlock *block;
1423 ram_addr_t last = 0;
1424
Mike Day0dc3f442013-09-05 14:41:35 -04001425 rcu_read_lock();
1426 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001427 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001428 }
Mike Day0dc3f442013-09-05 14:41:35 -04001429 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001430 return last;
1431}
1432
Jason Baronddb97f12012-08-02 15:44:16 -04001433static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1434{
1435 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001436
1437 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001438 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001439 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1440 if (ret) {
1441 perror("qemu_madvise");
1442 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1443 "but dump_guest_core=off specified\n");
1444 }
1445 }
1446}
1447
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001448const char *qemu_ram_get_idstr(RAMBlock *rb)
1449{
1450 return rb->idstr;
1451}
1452
Mike Dayae3a7042013-09-05 14:41:35 -04001453/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001454void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001455{
Gongleifa53a0e2016-05-10 10:04:59 +08001456 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001457
Avi Kivityc5705a72011-12-20 15:59:12 +02001458 assert(new_block);
1459 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001460
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001461 if (dev) {
1462 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001463 if (id) {
1464 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001465 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001466 }
1467 }
1468 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1469
Gongleiab0a9952016-05-10 10:05:00 +08001470 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001471 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001472 if (block != new_block &&
1473 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001474 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1475 new_block->idstr);
1476 abort();
1477 }
1478 }
Mike Day0dc3f442013-09-05 14:41:35 -04001479 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001480}
1481
Mike Dayae3a7042013-09-05 14:41:35 -04001482/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001483void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001484{
Mike Dayae3a7042013-09-05 14:41:35 -04001485 /* FIXME: arch_init.c assumes that this is not called throughout
1486 * migration. Ignore the problem since hot-unplug during migration
1487 * does not work anyway.
1488 */
Hu Tao20cfe882014-04-02 15:13:26 +08001489 if (block) {
1490 memset(block->idstr, 0, sizeof(block->idstr));
1491 }
1492}
1493
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001494size_t qemu_ram_pagesize(RAMBlock *rb)
1495{
1496 return rb->page_size;
1497}
1498
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001499static int memory_try_enable_merging(void *addr, size_t len)
1500{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001501 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001502 /* disabled by the user */
1503 return 0;
1504 }
1505
1506 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1507}
1508
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001509/* Only legal before guest might have detected the memory size: e.g. on
1510 * incoming migration, or right after reset.
1511 *
1512 * As memory core doesn't know how is memory accessed, it is up to
1513 * resize callback to update device state and/or add assertions to detect
1514 * misuse, if necessary.
1515 */
Gongleifa53a0e2016-05-10 10:04:59 +08001516int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001517{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001518 assert(block);
1519
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001520 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001521
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001522 if (block->used_length == newsize) {
1523 return 0;
1524 }
1525
1526 if (!(block->flags & RAM_RESIZEABLE)) {
1527 error_setg_errno(errp, EINVAL,
1528 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1529 " in != 0x" RAM_ADDR_FMT, block->idstr,
1530 newsize, block->used_length);
1531 return -EINVAL;
1532 }
1533
1534 if (block->max_length < newsize) {
1535 error_setg_errno(errp, EINVAL,
1536 "Length too large: %s: 0x" RAM_ADDR_FMT
1537 " > 0x" RAM_ADDR_FMT, block->idstr,
1538 newsize, block->max_length);
1539 return -EINVAL;
1540 }
1541
1542 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1543 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001544 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1545 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001546 memory_region_set_size(block->mr, newsize);
1547 if (block->resized) {
1548 block->resized(block->idstr, newsize, block->host);
1549 }
1550 return 0;
1551}
1552
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001553/* Called with ram_list.mutex held */
1554static void dirty_memory_extend(ram_addr_t old_ram_size,
1555 ram_addr_t new_ram_size)
1556{
1557 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1558 DIRTY_MEMORY_BLOCK_SIZE);
1559 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1560 DIRTY_MEMORY_BLOCK_SIZE);
1561 int i;
1562
1563 /* Only need to extend if block count increased */
1564 if (new_num_blocks <= old_num_blocks) {
1565 return;
1566 }
1567
1568 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1569 DirtyMemoryBlocks *old_blocks;
1570 DirtyMemoryBlocks *new_blocks;
1571 int j;
1572
1573 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1574 new_blocks = g_malloc(sizeof(*new_blocks) +
1575 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1576
1577 if (old_num_blocks) {
1578 memcpy(new_blocks->blocks, old_blocks->blocks,
1579 old_num_blocks * sizeof(old_blocks->blocks[0]));
1580 }
1581
1582 for (j = old_num_blocks; j < new_num_blocks; j++) {
1583 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1584 }
1585
1586 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1587
1588 if (old_blocks) {
1589 g_free_rcu(old_blocks, rcu);
1590 }
1591 }
1592}
1593
Fam Zheng528f46a2016-03-01 14:18:18 +08001594static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001595{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001596 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001597 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001598 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001599 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001600
1601 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001602
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001603 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001604 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001605
1606 if (!new_block->host) {
1607 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001608 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001609 new_block->mr, &err);
1610 if (err) {
1611 error_propagate(errp, err);
1612 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001613 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001614 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001615 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001616 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001617 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001618 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001619 error_setg_errno(errp, errno,
1620 "cannot set up guest memory '%s'",
1621 memory_region_name(new_block->mr));
1622 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001623 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001624 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001625 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001626 }
1627 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001628
Li Zhijiandd631692015-07-02 20:18:06 +08001629 new_ram_size = MAX(old_ram_size,
1630 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1631 if (new_ram_size > old_ram_size) {
1632 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001633 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001634 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001635 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1636 * QLIST (which has an RCU-friendly variant) does not have insertion at
1637 * tail, so save the last element in last_block.
1638 */
Mike Day0dc3f442013-09-05 14:41:35 -04001639 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001640 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001641 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001642 break;
1643 }
1644 }
1645 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001646 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001647 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001648 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001649 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001650 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001651 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001652 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001653
Mike Day0dc3f442013-09-05 14:41:35 -04001654 /* Write list before version */
1655 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001656 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001657 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001658
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001659 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001660 new_block->used_length,
1661 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001662
Paolo Bonzinia904c912015-01-21 16:18:35 +01001663 if (new_block->host) {
1664 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1665 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
Cao jinc2cd6272016-09-12 14:34:56 +08001666 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
Paolo Bonzinia904c912015-01-21 16:18:35 +01001667 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001668 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001669}
1670
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001671#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001672RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1673 bool share, const char *mem_path,
1674 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001675{
1676 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001677 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001678
1679 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001680 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001681 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001682 }
1683
1684 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1685 /*
1686 * file_ram_alloc() needs to allocate just like
1687 * phys_mem_alloc, but we haven't bothered to provide
1688 * a hook there.
1689 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001690 error_setg(errp,
1691 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001692 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001693 }
1694
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001695 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001696 new_block = g_malloc0(sizeof(*new_block));
1697 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001698 new_block->used_length = size;
1699 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001700 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001701 new_block->host = file_ram_alloc(new_block, size,
1702 mem_path, errp);
1703 if (!new_block->host) {
1704 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001705 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001706 }
1707
Fam Zheng528f46a2016-03-01 14:18:18 +08001708 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001709 if (local_err) {
1710 g_free(new_block);
1711 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001712 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001713 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001714 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001715}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001716#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001717
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001718static
Fam Zheng528f46a2016-03-01 14:18:18 +08001719RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1720 void (*resized)(const char*,
1721 uint64_t length,
1722 void *host),
1723 void *host, bool resizeable,
1724 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001725{
1726 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001727 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001728
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001729 size = HOST_PAGE_ALIGN(size);
1730 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001731 new_block = g_malloc0(sizeof(*new_block));
1732 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001733 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001734 new_block->used_length = size;
1735 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001736 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001737 new_block->fd = -1;
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001738 new_block->page_size = getpagesize();
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001739 new_block->host = host;
1740 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001741 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001742 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001743 if (resizeable) {
1744 new_block->flags |= RAM_RESIZEABLE;
1745 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001746 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001747 if (local_err) {
1748 g_free(new_block);
1749 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001750 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001751 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001752 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001753}
1754
Fam Zheng528f46a2016-03-01 14:18:18 +08001755RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001756 MemoryRegion *mr, Error **errp)
1757{
1758 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1759}
1760
Fam Zheng528f46a2016-03-01 14:18:18 +08001761RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001762{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001763 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1764}
1765
Fam Zheng528f46a2016-03-01 14:18:18 +08001766RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001767 void (*resized)(const char*,
1768 uint64_t length,
1769 void *host),
1770 MemoryRegion *mr, Error **errp)
1771{
1772 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001773}
bellarde9a1ab12007-02-08 23:08:38 +00001774
Paolo Bonzini43771532013-09-09 17:58:40 +02001775static void reclaim_ramblock(RAMBlock *block)
1776{
1777 if (block->flags & RAM_PREALLOC) {
1778 ;
1779 } else if (xen_enabled()) {
1780 xen_invalidate_map_cache_entry(block->host);
1781#ifndef _WIN32
1782 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001783 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001784 close(block->fd);
1785#endif
1786 } else {
1787 qemu_anon_ram_free(block->host, block->max_length);
1788 }
1789 g_free(block);
1790}
1791
Fam Zhengf1060c52016-03-01 14:18:22 +08001792void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001793{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001794 if (!block) {
1795 return;
1796 }
1797
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001798 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001799 QLIST_REMOVE_RCU(block, next);
1800 ram_list.mru_block = NULL;
1801 /* Write list before version */
1802 smp_wmb();
1803 ram_list.version++;
1804 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001805 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001806}
1807
Huang Yingcd19cfa2011-03-02 08:56:19 +01001808#ifndef _WIN32
1809void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1810{
1811 RAMBlock *block;
1812 ram_addr_t offset;
1813 int flags;
1814 void *area, *vaddr;
1815
Mike Day0dc3f442013-09-05 14:41:35 -04001816 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001817 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001818 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001819 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001820 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001821 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001822 } else if (xen_enabled()) {
1823 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001824 } else {
1825 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001826 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001827 flags |= (block->flags & RAM_SHARED ?
1828 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001829 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1830 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001831 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001832 /*
1833 * Remap needs to match alloc. Accelerators that
1834 * set phys_mem_alloc never remap. If they did,
1835 * we'd need a remap hook here.
1836 */
1837 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1838
Huang Yingcd19cfa2011-03-02 08:56:19 +01001839 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1840 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1841 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001842 }
1843 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001844 fprintf(stderr, "Could not remap addr: "
1845 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001846 length, addr);
1847 exit(1);
1848 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001849 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001850 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001851 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001852 }
1853 }
1854}
1855#endif /* !_WIN32 */
1856
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001857/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001858 * This should not be used for general purpose DMA. Use address_space_map
1859 * or address_space_rw instead. For local memory (e.g. video ram) that the
1860 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001861 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001862 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001863 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001864void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001865{
Gonglei3655cb92016-02-20 10:35:20 +08001866 RAMBlock *block = ram_block;
1867
1868 if (block == NULL) {
1869 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001870 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001871 }
Mike Dayae3a7042013-09-05 14:41:35 -04001872
1873 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001874 /* We need to check if the requested address is in the RAM
1875 * because we don't want to map the entire memory in QEMU.
1876 * In that case just map until the end of the page.
1877 */
1878 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001879 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001880 }
Mike Dayae3a7042013-09-05 14:41:35 -04001881
1882 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001883 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001884 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001885}
1886
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001887/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001888 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001889 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001890 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001891 */
Gonglei3655cb92016-02-20 10:35:20 +08001892static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1893 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001894{
Gonglei3655cb92016-02-20 10:35:20 +08001895 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001896 if (*size == 0) {
1897 return NULL;
1898 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001899
Gonglei3655cb92016-02-20 10:35:20 +08001900 if (block == NULL) {
1901 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001902 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001903 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001904 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001905
1906 if (xen_enabled() && block->host == NULL) {
1907 /* We need to check if the requested address is in the RAM
1908 * because we don't want to map the entire memory in QEMU.
1909 * In that case just map the requested area.
1910 */
1911 if (block->offset == 0) {
1912 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001913 }
1914
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001915 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001916 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001917
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001918 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001919}
1920
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001921/*
1922 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1923 * in that RAMBlock.
1924 *
1925 * ptr: Host pointer to look up
1926 * round_offset: If true round the result offset down to a page boundary
1927 * *ram_addr: set to result ram_addr
1928 * *offset: set to result offset within the RAMBlock
1929 *
1930 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001931 *
1932 * By the time this function returns, the returned pointer is not protected
1933 * by RCU anymore. If the caller is not within an RCU critical section and
1934 * does not hold the iothread lock, it must have other means of protecting the
1935 * pointer, such as a reference to the region that includes the incoming
1936 * ram_addr_t.
1937 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001938RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001939 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001940{
pbrook94a6b542009-04-11 17:15:54 +00001941 RAMBlock *block;
1942 uint8_t *host = ptr;
1943
Jan Kiszka868bb332011-06-21 22:59:09 +02001944 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001945 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001946 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001947 ram_addr = xen_ram_addr_from_mapcache(ptr);
1948 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001949 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001950 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001951 }
Mike Day0dc3f442013-09-05 14:41:35 -04001952 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001953 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001954 }
1955
Mike Day0dc3f442013-09-05 14:41:35 -04001956 rcu_read_lock();
1957 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001958 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001959 goto found;
1960 }
1961
Mike Day0dc3f442013-09-05 14:41:35 -04001962 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001963 /* This case append when the block is not mapped. */
1964 if (block->host == NULL) {
1965 continue;
1966 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001967 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001968 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001969 }
pbrook94a6b542009-04-11 17:15:54 +00001970 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001971
Mike Day0dc3f442013-09-05 14:41:35 -04001972 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001973 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001974
1975found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001976 *offset = (host - block->host);
1977 if (round_offset) {
1978 *offset &= TARGET_PAGE_MASK;
1979 }
Mike Day0dc3f442013-09-05 14:41:35 -04001980 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001981 return block;
1982}
1983
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001984/*
1985 * Finds the named RAMBlock
1986 *
1987 * name: The name of RAMBlock to find
1988 *
1989 * Returns: RAMBlock (or NULL if not found)
1990 */
1991RAMBlock *qemu_ram_block_by_name(const char *name)
1992{
1993 RAMBlock *block;
1994
1995 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1996 if (!strcmp(name, block->idstr)) {
1997 return block;
1998 }
1999 }
2000
2001 return NULL;
2002}
2003
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002004/* Some of the softmmu routines need to translate from a host pointer
2005 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002006ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002007{
2008 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02002009 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002010
Paolo Bonzinif615f392016-05-26 10:07:50 +02002011 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002012 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002013 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002014 }
2015
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002016 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002017}
Alex Williamsonf471a172010-06-11 11:11:42 -06002018
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002019/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002020static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002021 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002022{
Alex Bennéeba051fb2016-10-27 16:10:16 +01002023 bool locked = false;
2024
Juan Quintela52159192013-10-08 12:44:04 +02002025 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Alex Bennéeba051fb2016-10-27 16:10:16 +01002026 locked = true;
2027 tb_lock();
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002028 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002029 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002030 switch (size) {
2031 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002032 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002033 break;
2034 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002035 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002036 break;
2037 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002038 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002039 break;
2040 default:
2041 abort();
2042 }
Alex Bennéeba051fb2016-10-27 16:10:16 +01002043
2044 if (locked) {
2045 tb_unlock();
2046 }
2047
Paolo Bonzini58d27072015-03-23 11:56:01 +01002048 /* Set both VGA and migration bits for simplicity and to remove
2049 * the notdirty callback faster.
2050 */
2051 cpu_physical_memory_set_dirty_range(ram_addr, size,
2052 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002053 /* we remove the notdirty callback only if the code has been
2054 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002055 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002056 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002057 }
bellard1ccde1c2004-02-06 19:46:14 +00002058}
2059
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002060static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2061 unsigned size, bool is_write)
2062{
2063 return is_write;
2064}
2065
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002066static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002067 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002068 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002069 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002070};
2071
pbrook0f459d12008-06-09 00:20:13 +00002072/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002073static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002074{
Andreas Färber93afead2013-08-26 03:41:01 +02002075 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002076 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002077 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002078 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002079 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002080 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002081 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002082
Andreas Färberff4700b2013-08-26 18:23:18 +02002083 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002084 /* We re-entered the check after replacing the TB. Now raise
2085 * the debug interrupt so that is will trigger after the
2086 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002087 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002088 return;
2089 }
Andreas Färber93afead2013-08-26 03:41:01 +02002090 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002091 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002092 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2093 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002094 if (flags == BP_MEM_READ) {
2095 wp->flags |= BP_WATCHPOINT_HIT_READ;
2096 } else {
2097 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2098 }
2099 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002100 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002101 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002102 if (wp->flags & BP_CPU &&
2103 !cc->debug_check_watchpoint(cpu, wp)) {
2104 wp->flags &= ~BP_WATCHPOINT_HIT;
2105 continue;
2106 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002107 cpu->watchpoint_hit = wp;
KONRAD Frederica5e99822016-10-27 16:10:06 +01002108
2109 /* The tb_lock will be reset when cpu_loop_exit or
2110 * cpu_loop_exit_noexc longjmp back into the cpu_exec
2111 * main loop.
2112 */
2113 tb_lock();
Andreas Färber239c51a2013-09-01 17:12:23 +02002114 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002115 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002116 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002117 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002118 } else {
2119 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002120 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002121 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002122 }
aliguori06d55cc2008-11-18 20:24:06 +00002123 }
aliguori6e140f22008-11-18 20:37:55 +00002124 } else {
2125 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002126 }
2127 }
2128}
2129
pbrook6658ffb2007-03-16 23:58:11 +00002130/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2131 so these check for a hit then pass through to the normal out-of-line
2132 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002133static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2134 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002135{
Peter Maydell66b9b432015-04-26 16:49:24 +01002136 MemTxResult res;
2137 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002138 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2139 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002140
Peter Maydell66b9b432015-04-26 16:49:24 +01002141 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002142 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002143 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002144 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002145 break;
2146 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002147 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002148 break;
2149 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002150 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002151 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002152 default: abort();
2153 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002154 *pdata = data;
2155 return res;
2156}
2157
2158static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2159 uint64_t val, unsigned size,
2160 MemTxAttrs attrs)
2161{
2162 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002163 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2164 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002165
2166 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2167 switch (size) {
2168 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002169 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002170 break;
2171 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002172 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002173 break;
2174 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002175 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002176 break;
2177 default: abort();
2178 }
2179 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002180}
2181
Avi Kivity1ec9b902012-01-02 12:47:48 +02002182static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002183 .read_with_attrs = watch_mem_read,
2184 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002185 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002186};
pbrook6658ffb2007-03-16 23:58:11 +00002187
Peter Maydellf25a49e2015-04-26 16:49:24 +01002188static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2189 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002190{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002191 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002192 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002193 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002194
blueswir1db7b5422007-05-26 17:36:03 +00002195#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002196 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002197 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002198#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002199 res = address_space_read(subpage->as, addr + subpage->base,
2200 attrs, buf, len);
2201 if (res) {
2202 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002203 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002204 switch (len) {
2205 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002206 *data = ldub_p(buf);
2207 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002208 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002209 *data = lduw_p(buf);
2210 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002211 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002212 *data = ldl_p(buf);
2213 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002214 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002215 *data = ldq_p(buf);
2216 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002217 default:
2218 abort();
2219 }
blueswir1db7b5422007-05-26 17:36:03 +00002220}
2221
Peter Maydellf25a49e2015-04-26 16:49:24 +01002222static MemTxResult subpage_write(void *opaque, hwaddr addr,
2223 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002224{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002225 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002226 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002227
blueswir1db7b5422007-05-26 17:36:03 +00002228#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002229 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002230 " value %"PRIx64"\n",
2231 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002232#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002233 switch (len) {
2234 case 1:
2235 stb_p(buf, value);
2236 break;
2237 case 2:
2238 stw_p(buf, value);
2239 break;
2240 case 4:
2241 stl_p(buf, value);
2242 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002243 case 8:
2244 stq_p(buf, value);
2245 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002246 default:
2247 abort();
2248 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002249 return address_space_write(subpage->as, addr + subpage->base,
2250 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002251}
2252
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002253static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002254 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002255{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002256 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002257#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002258 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002259 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002260#endif
2261
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002262 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002263 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002264}
2265
Avi Kivity70c68e42012-01-02 12:32:48 +02002266static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002267 .read_with_attrs = subpage_read,
2268 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002269 .impl.min_access_size = 1,
2270 .impl.max_access_size = 8,
2271 .valid.min_access_size = 1,
2272 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002273 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002274 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002275};
2276
Anthony Liguoric227f092009-10-01 16:12:16 -05002277static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002278 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002279{
2280 int idx, eidx;
2281
2282 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2283 return -1;
2284 idx = SUBPAGE_IDX(start);
2285 eidx = SUBPAGE_IDX(end);
2286#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002287 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2288 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002289#endif
blueswir1db7b5422007-05-26 17:36:03 +00002290 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002291 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002292 }
2293
2294 return 0;
2295}
2296
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002297static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002298{
Anthony Liguoric227f092009-10-01 16:12:16 -05002299 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002300
Vijaya Kumar K2615fab2016-10-24 16:26:49 +01002301 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002302 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002303 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002304 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002305 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002306 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002307#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002308 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2309 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002310#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002311 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002312
2313 return mmio;
2314}
2315
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002316static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2317 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002318{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002319 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002320 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002321 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002322 .mr = mr,
2323 .offset_within_address_space = 0,
2324 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002325 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002326 };
2327
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002328 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002329}
2330
Peter Maydella54c87b2016-01-21 14:15:05 +00002331MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002332{
Peter Maydella54c87b2016-01-21 14:15:05 +00002333 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2334 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002335 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002336 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002337
2338 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002339}
2340
Avi Kivitye9179ce2009-06-14 11:38:52 +03002341static void io_mem_init(void)
2342{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002343 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002344 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002345 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002346 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002347 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002348 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002349 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002350}
2351
Avi Kivityac1970f2012-10-03 16:22:53 +02002352static void mem_begin(MemoryListener *listener)
2353{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002354 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002355 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2356 uint16_t n;
2357
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002358 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002359 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002360 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002361 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002362 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002363 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002364 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002365 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002366
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002367 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002368 d->as = as;
2369 as->next_dispatch = d;
2370}
2371
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002372static void address_space_dispatch_free(AddressSpaceDispatch *d)
2373{
2374 phys_sections_free(&d->map);
2375 g_free(d);
2376}
2377
Paolo Bonzini00752702013-05-29 12:13:54 +02002378static void mem_commit(MemoryListener *listener)
2379{
2380 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002381 AddressSpaceDispatch *cur = as->dispatch;
2382 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002383
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002384 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002385
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002386 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002387 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002388 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002389 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002390}
2391
Avi Kivity1d711482012-10-02 18:54:45 +02002392static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002393{
Peter Maydell32857f42015-10-01 15:29:50 +01002394 CPUAddressSpace *cpuas;
2395 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002396
2397 /* since each CPU stores ram addresses in its TLB cache, we must
2398 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002399 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2400 cpu_reloading_memory_map();
2401 /* The CPU and TLB are protected by the iothread lock.
2402 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2403 * may have split the RCU critical section.
2404 */
2405 d = atomic_rcu_read(&cpuas->as->dispatch);
Alex Bennéef35e44e2016-10-21 16:34:18 +01002406 atomic_rcu_set(&cpuas->memory_dispatch, d);
Peter Maydell32857f42015-10-01 15:29:50 +01002407 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002408}
2409
Avi Kivityac1970f2012-10-03 16:22:53 +02002410void address_space_init_dispatch(AddressSpace *as)
2411{
Paolo Bonzini00752702013-05-29 12:13:54 +02002412 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002413 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002414 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002415 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002416 .region_add = mem_add,
2417 .region_nop = mem_add,
2418 .priority = 0,
2419 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002420 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002421}
2422
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002423void address_space_unregister(AddressSpace *as)
2424{
2425 memory_listener_unregister(&as->dispatch_listener);
2426}
2427
Avi Kivity83f3c252012-10-07 12:59:55 +02002428void address_space_destroy_dispatch(AddressSpace *as)
2429{
2430 AddressSpaceDispatch *d = as->dispatch;
2431
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002432 atomic_rcu_set(&as->dispatch, NULL);
2433 if (d) {
2434 call_rcu(d, address_space_dispatch_free, rcu);
2435 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002436}
2437
Avi Kivity62152b82011-07-26 14:26:14 +03002438static void memory_map_init(void)
2439{
Anthony Liguori7267c092011-08-20 22:09:37 -05002440 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002441
Paolo Bonzini57271d62013-11-07 17:14:37 +01002442 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002443 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002444
Anthony Liguori7267c092011-08-20 22:09:37 -05002445 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002446 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2447 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002448 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002449}
2450
2451MemoryRegion *get_system_memory(void)
2452{
2453 return system_memory;
2454}
2455
Avi Kivity309cb472011-08-08 16:09:03 +03002456MemoryRegion *get_system_io(void)
2457{
2458 return system_io;
2459}
2460
pbrooke2eef172008-06-08 01:09:01 +00002461#endif /* !defined(CONFIG_USER_ONLY) */
2462
bellard13eb76e2004-01-24 15:23:36 +00002463/* physical memory access (slow version, mainly for debug) */
2464#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002465int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002466 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002467{
2468 int l, flags;
2469 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002470 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002471
2472 while (len > 0) {
2473 page = addr & TARGET_PAGE_MASK;
2474 l = (page + TARGET_PAGE_SIZE) - addr;
2475 if (l > len)
2476 l = len;
2477 flags = page_get_flags(page);
2478 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002479 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002480 if (is_write) {
2481 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002482 return -1;
bellard579a97f2007-11-11 14:26:47 +00002483 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002484 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002485 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002486 memcpy(p, buf, l);
2487 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002488 } else {
2489 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002490 return -1;
bellard579a97f2007-11-11 14:26:47 +00002491 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002492 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002493 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002494 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002495 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002496 }
2497 len -= l;
2498 buf += l;
2499 addr += l;
2500 }
Paul Brooka68fe892010-03-01 00:08:59 +00002501 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002502}
bellard8df1cd02005-01-28 22:37:22 +00002503
bellard13eb76e2004-01-24 15:23:36 +00002504#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002505
Paolo Bonzini845b6212015-03-23 11:45:53 +01002506static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002507 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002508{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002509 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002510 addr += memory_region_get_ram_addr(mr);
2511
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002512 /* No early return if dirty_log_mask is or becomes 0, because
2513 * cpu_physical_memory_set_dirty_range will still call
2514 * xen_modified_memory.
2515 */
2516 if (dirty_log_mask) {
2517 dirty_log_mask =
2518 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002519 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002520 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
Alex Bennéeba051fb2016-10-27 16:10:16 +01002521 tb_lock();
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002522 tb_invalidate_phys_range(addr, addr + length);
Alex Bennéeba051fb2016-10-27 16:10:16 +01002523 tb_unlock();
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002524 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2525 }
2526 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002527}
2528
Richard Henderson23326162013-07-08 14:55:59 -07002529static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002530{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002531 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002532
2533 /* Regions are assumed to support 1-4 byte accesses unless
2534 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002535 if (access_size_max == 0) {
2536 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002537 }
Richard Henderson23326162013-07-08 14:55:59 -07002538
2539 /* Bound the maximum access by the alignment of the address. */
2540 if (!mr->ops->impl.unaligned) {
2541 unsigned align_size_max = addr & -addr;
2542 if (align_size_max != 0 && align_size_max < access_size_max) {
2543 access_size_max = align_size_max;
2544 }
2545 }
2546
2547 /* Don't attempt accesses larger than the maximum. */
2548 if (l > access_size_max) {
2549 l = access_size_max;
2550 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002551 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002552
2553 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002554}
2555
Jan Kiszka4840f102015-06-18 18:47:22 +02002556static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002557{
Jan Kiszka4840f102015-06-18 18:47:22 +02002558 bool unlocked = !qemu_mutex_iothread_locked();
2559 bool release_lock = false;
2560
2561 if (unlocked && mr->global_locking) {
2562 qemu_mutex_lock_iothread();
2563 unlocked = false;
2564 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002565 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002566 if (mr->flush_coalesced_mmio) {
2567 if (unlocked) {
2568 qemu_mutex_lock_iothread();
2569 }
2570 qemu_flush_coalesced_mmio_buffer();
2571 if (unlocked) {
2572 qemu_mutex_unlock_iothread();
2573 }
2574 }
2575
2576 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002577}
2578
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002579/* Called within RCU critical section. */
2580static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2581 MemTxAttrs attrs,
2582 const uint8_t *buf,
2583 int len, hwaddr addr1,
2584 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002585{
bellard13eb76e2004-01-24 15:23:36 +00002586 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002587 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002588 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002589 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002590
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002591 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002592 if (!memory_access_is_direct(mr, true)) {
2593 release_lock |= prepare_mmio_access(mr);
2594 l = memory_access_size(mr, l, addr1);
2595 /* XXX: could force current_cpu to NULL to avoid
2596 potential bugs */
2597 switch (l) {
2598 case 8:
2599 /* 64 bit write access */
2600 val = ldq_p(buf);
2601 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2602 attrs);
2603 break;
2604 case 4:
2605 /* 32 bit write access */
2606 val = ldl_p(buf);
2607 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2608 attrs);
2609 break;
2610 case 2:
2611 /* 16 bit write access */
2612 val = lduw_p(buf);
2613 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2614 attrs);
2615 break;
2616 case 1:
2617 /* 8 bit write access */
2618 val = ldub_p(buf);
2619 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2620 attrs);
2621 break;
2622 default:
2623 abort();
bellard13eb76e2004-01-24 15:23:36 +00002624 }
2625 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002626 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002627 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002628 memcpy(ptr, buf, l);
2629 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002630 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002631
2632 if (release_lock) {
2633 qemu_mutex_unlock_iothread();
2634 release_lock = false;
2635 }
2636
bellard13eb76e2004-01-24 15:23:36 +00002637 len -= l;
2638 buf += l;
2639 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002640
2641 if (!len) {
2642 break;
2643 }
2644
2645 l = len;
2646 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002647 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002648
Peter Maydell3b643492015-04-26 16:49:23 +01002649 return result;
bellard13eb76e2004-01-24 15:23:36 +00002650}
bellard8df1cd02005-01-28 22:37:22 +00002651
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002652MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2653 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002654{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002655 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002656 hwaddr addr1;
2657 MemoryRegion *mr;
2658 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002659
2660 if (len > 0) {
2661 rcu_read_lock();
2662 l = len;
2663 mr = address_space_translate(as, addr, &addr1, &l, true);
2664 result = address_space_write_continue(as, addr, attrs, buf, len,
2665 addr1, l, mr);
2666 rcu_read_unlock();
2667 }
2668
2669 return result;
2670}
2671
2672/* Called within RCU critical section. */
2673MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2674 MemTxAttrs attrs, uint8_t *buf,
2675 int len, hwaddr addr1, hwaddr l,
2676 MemoryRegion *mr)
2677{
2678 uint8_t *ptr;
2679 uint64_t val;
2680 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002681 bool release_lock = false;
2682
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002683 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002684 if (!memory_access_is_direct(mr, false)) {
2685 /* I/O case */
2686 release_lock |= prepare_mmio_access(mr);
2687 l = memory_access_size(mr, l, addr1);
2688 switch (l) {
2689 case 8:
2690 /* 64 bit read access */
2691 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2692 attrs);
2693 stq_p(buf, val);
2694 break;
2695 case 4:
2696 /* 32 bit read access */
2697 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2698 attrs);
2699 stl_p(buf, val);
2700 break;
2701 case 2:
2702 /* 16 bit read access */
2703 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2704 attrs);
2705 stw_p(buf, val);
2706 break;
2707 case 1:
2708 /* 8 bit read access */
2709 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2710 attrs);
2711 stb_p(buf, val);
2712 break;
2713 default:
2714 abort();
2715 }
2716 } else {
2717 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002718 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002719 memcpy(buf, ptr, l);
2720 }
2721
2722 if (release_lock) {
2723 qemu_mutex_unlock_iothread();
2724 release_lock = false;
2725 }
2726
2727 len -= l;
2728 buf += l;
2729 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002730
2731 if (!len) {
2732 break;
2733 }
2734
2735 l = len;
2736 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002737 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002738
2739 return result;
2740}
2741
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002742MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2743 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002744{
2745 hwaddr l;
2746 hwaddr addr1;
2747 MemoryRegion *mr;
2748 MemTxResult result = MEMTX_OK;
2749
2750 if (len > 0) {
2751 rcu_read_lock();
2752 l = len;
2753 mr = address_space_translate(as, addr, &addr1, &l, false);
2754 result = address_space_read_continue(as, addr, attrs, buf, len,
2755 addr1, l, mr);
2756 rcu_read_unlock();
2757 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002758
2759 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002760}
2761
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002762MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2763 uint8_t *buf, int len, bool is_write)
2764{
2765 if (is_write) {
2766 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2767 } else {
2768 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2769 }
2770}
Avi Kivityac1970f2012-10-03 16:22:53 +02002771
Avi Kivitya8170e52012-10-23 12:30:10 +02002772void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002773 int len, int is_write)
2774{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002775 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2776 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002777}
2778
Alexander Graf582b55a2013-12-11 14:17:44 +01002779enum write_rom_type {
2780 WRITE_DATA,
2781 FLUSH_CACHE,
2782};
2783
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002784static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002785 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002786{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002787 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002788 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002789 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002790 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002791
Paolo Bonzini41063e12015-03-18 14:21:43 +01002792 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002793 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002794 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002795 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002796
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002797 if (!(memory_region_is_ram(mr) ||
2798 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002799 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002800 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002801 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002802 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002803 switch (type) {
2804 case WRITE_DATA:
2805 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002806 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002807 break;
2808 case FLUSH_CACHE:
2809 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2810 break;
2811 }
bellardd0ecd2a2006-04-23 17:14:48 +00002812 }
2813 len -= l;
2814 buf += l;
2815 addr += l;
2816 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002817 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002818}
2819
Alexander Graf582b55a2013-12-11 14:17:44 +01002820/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002821void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002822 const uint8_t *buf, int len)
2823{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002824 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002825}
2826
2827void cpu_flush_icache_range(hwaddr start, int len)
2828{
2829 /*
2830 * This function should do the same thing as an icache flush that was
2831 * triggered from within the guest. For TCG we are always cache coherent,
2832 * so there is no need to flush anything. For KVM / Xen we need to flush
2833 * the host's instruction cache at least.
2834 */
2835 if (tcg_enabled()) {
2836 return;
2837 }
2838
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002839 cpu_physical_memory_write_rom_internal(&address_space_memory,
2840 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002841}
2842
aliguori6d16c2f2009-01-22 16:59:11 +00002843typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002844 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002845 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002846 hwaddr addr;
2847 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002848 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002849} BounceBuffer;
2850
2851static BounceBuffer bounce;
2852
aliguoriba223c22009-01-22 16:59:16 +00002853typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002854 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002855 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002856} MapClient;
2857
Fam Zheng38e047b2015-03-16 17:03:35 +08002858QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002859static QLIST_HEAD(map_client_list, MapClient) map_client_list
2860 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002861
Fam Zhenge95205e2015-03-16 17:03:37 +08002862static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002863{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002864 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002865 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002866}
2867
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002868static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002869{
2870 MapClient *client;
2871
Blue Swirl72cf2d42009-09-12 07:36:22 +00002872 while (!QLIST_EMPTY(&map_client_list)) {
2873 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002874 qemu_bh_schedule(client->bh);
2875 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002876 }
2877}
2878
Fam Zhenge95205e2015-03-16 17:03:37 +08002879void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002880{
2881 MapClient *client = g_malloc(sizeof(*client));
2882
Fam Zheng38e047b2015-03-16 17:03:35 +08002883 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002884 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002885 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002886 if (!atomic_read(&bounce.in_use)) {
2887 cpu_notify_map_clients_locked();
2888 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002889 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002890}
2891
Fam Zheng38e047b2015-03-16 17:03:35 +08002892void cpu_exec_init_all(void)
2893{
2894 qemu_mutex_init(&ram_list.mutex);
Peter Maydell20bccb82016-10-24 16:26:49 +01002895 /* The data structures we set up here depend on knowing the page size,
2896 * so no more changes can be made after this point.
2897 * In an ideal world, nothing we did before we had finished the
2898 * machine setup would care about the target page size, and we could
2899 * do this much later, rather than requiring board models to state
2900 * up front what their requirements are.
2901 */
2902 finalize_target_page_bits();
Fam Zheng38e047b2015-03-16 17:03:35 +08002903 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002904 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002905 qemu_mutex_init(&map_client_list_lock);
2906}
2907
Fam Zhenge95205e2015-03-16 17:03:37 +08002908void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002909{
Fam Zhenge95205e2015-03-16 17:03:37 +08002910 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002911
Fam Zhenge95205e2015-03-16 17:03:37 +08002912 qemu_mutex_lock(&map_client_list_lock);
2913 QLIST_FOREACH(client, &map_client_list, link) {
2914 if (client->bh == bh) {
2915 cpu_unregister_map_client_do(client);
2916 break;
2917 }
2918 }
2919 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002920}
2921
2922static void cpu_notify_map_clients(void)
2923{
Fam Zheng38e047b2015-03-16 17:03:35 +08002924 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002925 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002926 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002927}
2928
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002929bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2930{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002931 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002932 hwaddr l, xlat;
2933
Paolo Bonzini41063e12015-03-18 14:21:43 +01002934 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002935 while (len > 0) {
2936 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002937 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2938 if (!memory_access_is_direct(mr, is_write)) {
2939 l = memory_access_size(mr, l, addr);
2940 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002941 return false;
2942 }
2943 }
2944
2945 len -= l;
2946 addr += l;
2947 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002948 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002949 return true;
2950}
2951
aliguori6d16c2f2009-01-22 16:59:11 +00002952/* Map a physical memory region into a host virtual address.
2953 * May map a subset of the requested range, given by and returned in *plen.
2954 * May return NULL if resources needed to perform the mapping are exhausted.
2955 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002956 * Use cpu_register_map_client() to know when retrying the map operation is
2957 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002958 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002959void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002960 hwaddr addr,
2961 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002962 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002963{
Avi Kivitya8170e52012-10-23 12:30:10 +02002964 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002965 hwaddr done = 0;
2966 hwaddr l, xlat, base;
2967 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002968 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002969
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002970 if (len == 0) {
2971 return NULL;
2972 }
aliguori6d16c2f2009-01-22 16:59:11 +00002973
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002974 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002975 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002976 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002977
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002978 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002979 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002980 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002981 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002982 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002983 /* Avoid unbounded allocations */
2984 l = MIN(l, TARGET_PAGE_SIZE);
2985 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002986 bounce.addr = addr;
2987 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002988
2989 memory_region_ref(mr);
2990 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002991 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002992 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2993 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002994 }
aliguori6d16c2f2009-01-22 16:59:11 +00002995
Paolo Bonzini41063e12015-03-18 14:21:43 +01002996 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002997 *plen = l;
2998 return bounce.buffer;
2999 }
3000
3001 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003002
3003 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00003004 len -= l;
3005 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003006 done += l;
3007 if (len == 0) {
3008 break;
3009 }
3010
3011 l = len;
3012 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3013 if (this_mr != mr || xlat != base + done) {
3014 break;
3015 }
aliguori6d16c2f2009-01-22 16:59:11 +00003016 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003017
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003018 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003019 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003020 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003021 rcu_read_unlock();
3022
3023 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003024}
3025
Avi Kivityac1970f2012-10-03 16:22:53 +02003026/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003027 * Will also mark the memory as dirty if is_write == 1. access_len gives
3028 * the amount of memory that was actually read or written by the caller.
3029 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003030void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3031 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003032{
3033 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003034 MemoryRegion *mr;
3035 ram_addr_t addr1;
3036
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01003037 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003038 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003039 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003040 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003041 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003042 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003043 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003044 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003045 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003046 return;
3047 }
3048 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003049 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3050 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003051 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003052 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003053 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003054 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003055 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003056 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003057}
bellardd0ecd2a2006-04-23 17:14:48 +00003058
Avi Kivitya8170e52012-10-23 12:30:10 +02003059void *cpu_physical_memory_map(hwaddr addr,
3060 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003061 int is_write)
3062{
3063 return address_space_map(&address_space_memory, addr, plen, is_write);
3064}
3065
Avi Kivitya8170e52012-10-23 12:30:10 +02003066void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3067 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003068{
3069 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3070}
3071
bellard8df1cd02005-01-28 22:37:22 +00003072/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003073static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3074 MemTxAttrs attrs,
3075 MemTxResult *result,
3076 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003077{
bellard8df1cd02005-01-28 22:37:22 +00003078 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003079 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003080 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003081 hwaddr l = 4;
3082 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003083 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003084 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003085
Paolo Bonzini41063e12015-03-18 14:21:43 +01003086 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003087 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003088 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003089 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003090
bellard8df1cd02005-01-28 22:37:22 +00003091 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003092 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003093#if defined(TARGET_WORDS_BIGENDIAN)
3094 if (endian == DEVICE_LITTLE_ENDIAN) {
3095 val = bswap32(val);
3096 }
3097#else
3098 if (endian == DEVICE_BIG_ENDIAN) {
3099 val = bswap32(val);
3100 }
3101#endif
bellard8df1cd02005-01-28 22:37:22 +00003102 } else {
3103 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003104 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003105 switch (endian) {
3106 case DEVICE_LITTLE_ENDIAN:
3107 val = ldl_le_p(ptr);
3108 break;
3109 case DEVICE_BIG_ENDIAN:
3110 val = ldl_be_p(ptr);
3111 break;
3112 default:
3113 val = ldl_p(ptr);
3114 break;
3115 }
Peter Maydell50013112015-04-26 16:49:24 +01003116 r = MEMTX_OK;
3117 }
3118 if (result) {
3119 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003120 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003121 if (release_lock) {
3122 qemu_mutex_unlock_iothread();
3123 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003124 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003125 return val;
3126}
3127
Peter Maydell50013112015-04-26 16:49:24 +01003128uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3129 MemTxAttrs attrs, MemTxResult *result)
3130{
3131 return address_space_ldl_internal(as, addr, attrs, result,
3132 DEVICE_NATIVE_ENDIAN);
3133}
3134
3135uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3136 MemTxAttrs attrs, MemTxResult *result)
3137{
3138 return address_space_ldl_internal(as, addr, attrs, result,
3139 DEVICE_LITTLE_ENDIAN);
3140}
3141
3142uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3143 MemTxAttrs attrs, MemTxResult *result)
3144{
3145 return address_space_ldl_internal(as, addr, attrs, result,
3146 DEVICE_BIG_ENDIAN);
3147}
3148
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003149uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003150{
Peter Maydell50013112015-04-26 16:49:24 +01003151 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003152}
3153
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003154uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003155{
Peter Maydell50013112015-04-26 16:49:24 +01003156 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003157}
3158
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003159uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003160{
Peter Maydell50013112015-04-26 16:49:24 +01003161 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003162}
3163
bellard84b7b8e2005-11-28 21:19:04 +00003164/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003165static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3166 MemTxAttrs attrs,
3167 MemTxResult *result,
3168 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003169{
bellard84b7b8e2005-11-28 21:19:04 +00003170 uint8_t *ptr;
3171 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003172 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003173 hwaddr l = 8;
3174 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003175 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003176 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003177
Paolo Bonzini41063e12015-03-18 14:21:43 +01003178 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003179 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003180 false);
3181 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003182 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003183
bellard84b7b8e2005-11-28 21:19:04 +00003184 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003185 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003186#if defined(TARGET_WORDS_BIGENDIAN)
3187 if (endian == DEVICE_LITTLE_ENDIAN) {
3188 val = bswap64(val);
3189 }
3190#else
3191 if (endian == DEVICE_BIG_ENDIAN) {
3192 val = bswap64(val);
3193 }
3194#endif
bellard84b7b8e2005-11-28 21:19:04 +00003195 } else {
3196 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003197 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003198 switch (endian) {
3199 case DEVICE_LITTLE_ENDIAN:
3200 val = ldq_le_p(ptr);
3201 break;
3202 case DEVICE_BIG_ENDIAN:
3203 val = ldq_be_p(ptr);
3204 break;
3205 default:
3206 val = ldq_p(ptr);
3207 break;
3208 }
Peter Maydell50013112015-04-26 16:49:24 +01003209 r = MEMTX_OK;
3210 }
3211 if (result) {
3212 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003213 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003214 if (release_lock) {
3215 qemu_mutex_unlock_iothread();
3216 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003217 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003218 return val;
3219}
3220
Peter Maydell50013112015-04-26 16:49:24 +01003221uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3222 MemTxAttrs attrs, MemTxResult *result)
3223{
3224 return address_space_ldq_internal(as, addr, attrs, result,
3225 DEVICE_NATIVE_ENDIAN);
3226}
3227
3228uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3229 MemTxAttrs attrs, MemTxResult *result)
3230{
3231 return address_space_ldq_internal(as, addr, attrs, result,
3232 DEVICE_LITTLE_ENDIAN);
3233}
3234
3235uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3236 MemTxAttrs attrs, MemTxResult *result)
3237{
3238 return address_space_ldq_internal(as, addr, attrs, result,
3239 DEVICE_BIG_ENDIAN);
3240}
3241
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003242uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003243{
Peter Maydell50013112015-04-26 16:49:24 +01003244 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003245}
3246
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003247uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003248{
Peter Maydell50013112015-04-26 16:49:24 +01003249 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250}
3251
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003252uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003253{
Peter Maydell50013112015-04-26 16:49:24 +01003254 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003255}
3256
bellardaab33092005-10-30 20:48:42 +00003257/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003258uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3259 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003260{
3261 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003262 MemTxResult r;
3263
3264 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3265 if (result) {
3266 *result = r;
3267 }
bellardaab33092005-10-30 20:48:42 +00003268 return val;
3269}
3270
Peter Maydell50013112015-04-26 16:49:24 +01003271uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3272{
3273 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3274}
3275
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003276/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003277static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3278 hwaddr addr,
3279 MemTxAttrs attrs,
3280 MemTxResult *result,
3281 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003282{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003283 uint8_t *ptr;
3284 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003285 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003286 hwaddr l = 2;
3287 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003288 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003289 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003290
Paolo Bonzini41063e12015-03-18 14:21:43 +01003291 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003292 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003293 false);
3294 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003295 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003296
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003297 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003298 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003299#if defined(TARGET_WORDS_BIGENDIAN)
3300 if (endian == DEVICE_LITTLE_ENDIAN) {
3301 val = bswap16(val);
3302 }
3303#else
3304 if (endian == DEVICE_BIG_ENDIAN) {
3305 val = bswap16(val);
3306 }
3307#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003308 } else {
3309 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003310 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003311 switch (endian) {
3312 case DEVICE_LITTLE_ENDIAN:
3313 val = lduw_le_p(ptr);
3314 break;
3315 case DEVICE_BIG_ENDIAN:
3316 val = lduw_be_p(ptr);
3317 break;
3318 default:
3319 val = lduw_p(ptr);
3320 break;
3321 }
Peter Maydell50013112015-04-26 16:49:24 +01003322 r = MEMTX_OK;
3323 }
3324 if (result) {
3325 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003326 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003327 if (release_lock) {
3328 qemu_mutex_unlock_iothread();
3329 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003330 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003331 return val;
bellardaab33092005-10-30 20:48:42 +00003332}
3333
Peter Maydell50013112015-04-26 16:49:24 +01003334uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3335 MemTxAttrs attrs, MemTxResult *result)
3336{
3337 return address_space_lduw_internal(as, addr, attrs, result,
3338 DEVICE_NATIVE_ENDIAN);
3339}
3340
3341uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3342 MemTxAttrs attrs, MemTxResult *result)
3343{
3344 return address_space_lduw_internal(as, addr, attrs, result,
3345 DEVICE_LITTLE_ENDIAN);
3346}
3347
3348uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3349 MemTxAttrs attrs, MemTxResult *result)
3350{
3351 return address_space_lduw_internal(as, addr, attrs, result,
3352 DEVICE_BIG_ENDIAN);
3353}
3354
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003355uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003356{
Peter Maydell50013112015-04-26 16:49:24 +01003357 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358}
3359
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003360uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003361{
Peter Maydell50013112015-04-26 16:49:24 +01003362 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003363}
3364
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003365uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003366{
Peter Maydell50013112015-04-26 16:49:24 +01003367 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368}
3369
bellard8df1cd02005-01-28 22:37:22 +00003370/* warning: addr must be aligned. The ram page is not masked as dirty
3371 and the code inside is not invalidated. It is useful if the dirty
3372 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003373void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3374 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003375{
bellard8df1cd02005-01-28 22:37:22 +00003376 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003377 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003378 hwaddr l = 4;
3379 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003380 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003381 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003382 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003383
Paolo Bonzini41063e12015-03-18 14:21:43 +01003384 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003385 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003386 true);
3387 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003388 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003389
Peter Maydell50013112015-04-26 16:49:24 +01003390 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003391 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003392 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003393 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003394
Paolo Bonzini845b6212015-03-23 11:45:53 +01003395 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3396 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003397 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3398 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003399 r = MEMTX_OK;
3400 }
3401 if (result) {
3402 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003403 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003404 if (release_lock) {
3405 qemu_mutex_unlock_iothread();
3406 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003407 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003408}
3409
Peter Maydell50013112015-04-26 16:49:24 +01003410void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3411{
3412 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3413}
3414
bellard8df1cd02005-01-28 22:37:22 +00003415/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003416static inline void address_space_stl_internal(AddressSpace *as,
3417 hwaddr addr, uint32_t val,
3418 MemTxAttrs attrs,
3419 MemTxResult *result,
3420 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003421{
bellard8df1cd02005-01-28 22:37:22 +00003422 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003423 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003424 hwaddr l = 4;
3425 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003426 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003427 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003428
Paolo Bonzini41063e12015-03-18 14:21:43 +01003429 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003430 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003431 true);
3432 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003433 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003434
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003435#if defined(TARGET_WORDS_BIGENDIAN)
3436 if (endian == DEVICE_LITTLE_ENDIAN) {
3437 val = bswap32(val);
3438 }
3439#else
3440 if (endian == DEVICE_BIG_ENDIAN) {
3441 val = bswap32(val);
3442 }
3443#endif
Peter Maydell50013112015-04-26 16:49:24 +01003444 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003445 } else {
bellard8df1cd02005-01-28 22:37:22 +00003446 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003447 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003448 switch (endian) {
3449 case DEVICE_LITTLE_ENDIAN:
3450 stl_le_p(ptr, val);
3451 break;
3452 case DEVICE_BIG_ENDIAN:
3453 stl_be_p(ptr, val);
3454 break;
3455 default:
3456 stl_p(ptr, val);
3457 break;
3458 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003459 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003460 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003461 }
Peter Maydell50013112015-04-26 16:49:24 +01003462 if (result) {
3463 *result = r;
3464 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003465 if (release_lock) {
3466 qemu_mutex_unlock_iothread();
3467 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003468 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003469}
3470
3471void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3472 MemTxAttrs attrs, MemTxResult *result)
3473{
3474 address_space_stl_internal(as, addr, val, attrs, result,
3475 DEVICE_NATIVE_ENDIAN);
3476}
3477
3478void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3479 MemTxAttrs attrs, MemTxResult *result)
3480{
3481 address_space_stl_internal(as, addr, val, attrs, result,
3482 DEVICE_LITTLE_ENDIAN);
3483}
3484
3485void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3486 MemTxAttrs attrs, MemTxResult *result)
3487{
3488 address_space_stl_internal(as, addr, val, attrs, result,
3489 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003490}
3491
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003492void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003493{
Peter Maydell50013112015-04-26 16:49:24 +01003494 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003495}
3496
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003497void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003498{
Peter Maydell50013112015-04-26 16:49:24 +01003499 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003500}
3501
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003502void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003503{
Peter Maydell50013112015-04-26 16:49:24 +01003504 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003505}
3506
bellardaab33092005-10-30 20:48:42 +00003507/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003508void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3509 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003510{
3511 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003512 MemTxResult r;
3513
3514 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3515 if (result) {
3516 *result = r;
3517 }
3518}
3519
3520void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3521{
3522 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003523}
3524
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003525/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003526static inline void address_space_stw_internal(AddressSpace *as,
3527 hwaddr addr, uint32_t val,
3528 MemTxAttrs attrs,
3529 MemTxResult *result,
3530 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003531{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003532 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003533 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003534 hwaddr l = 2;
3535 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003536 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003537 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003538
Paolo Bonzini41063e12015-03-18 14:21:43 +01003539 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003540 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003541 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003542 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003543
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003544#if defined(TARGET_WORDS_BIGENDIAN)
3545 if (endian == DEVICE_LITTLE_ENDIAN) {
3546 val = bswap16(val);
3547 }
3548#else
3549 if (endian == DEVICE_BIG_ENDIAN) {
3550 val = bswap16(val);
3551 }
3552#endif
Peter Maydell50013112015-04-26 16:49:24 +01003553 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003554 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003555 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003556 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003557 switch (endian) {
3558 case DEVICE_LITTLE_ENDIAN:
3559 stw_le_p(ptr, val);
3560 break;
3561 case DEVICE_BIG_ENDIAN:
3562 stw_be_p(ptr, val);
3563 break;
3564 default:
3565 stw_p(ptr, val);
3566 break;
3567 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003568 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003569 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003570 }
Peter Maydell50013112015-04-26 16:49:24 +01003571 if (result) {
3572 *result = r;
3573 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003574 if (release_lock) {
3575 qemu_mutex_unlock_iothread();
3576 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003577 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003578}
3579
3580void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3581 MemTxAttrs attrs, MemTxResult *result)
3582{
3583 address_space_stw_internal(as, addr, val, attrs, result,
3584 DEVICE_NATIVE_ENDIAN);
3585}
3586
3587void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3588 MemTxAttrs attrs, MemTxResult *result)
3589{
3590 address_space_stw_internal(as, addr, val, attrs, result,
3591 DEVICE_LITTLE_ENDIAN);
3592}
3593
3594void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3595 MemTxAttrs attrs, MemTxResult *result)
3596{
3597 address_space_stw_internal(as, addr, val, attrs, result,
3598 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003599}
3600
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003601void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003602{
Peter Maydell50013112015-04-26 16:49:24 +01003603 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003604}
3605
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003606void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003607{
Peter Maydell50013112015-04-26 16:49:24 +01003608 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003609}
3610
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003611void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003612{
Peter Maydell50013112015-04-26 16:49:24 +01003613 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003614}
3615
bellardaab33092005-10-30 20:48:42 +00003616/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003617void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3618 MemTxAttrs attrs, MemTxResult *result)
3619{
3620 MemTxResult r;
3621 val = tswap64(val);
3622 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3623 if (result) {
3624 *result = r;
3625 }
3626}
3627
3628void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3629 MemTxAttrs attrs, MemTxResult *result)
3630{
3631 MemTxResult r;
3632 val = cpu_to_le64(val);
3633 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3634 if (result) {
3635 *result = r;
3636 }
3637}
3638void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3639 MemTxAttrs attrs, MemTxResult *result)
3640{
3641 MemTxResult r;
3642 val = cpu_to_be64(val);
3643 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3644 if (result) {
3645 *result = r;
3646 }
3647}
3648
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003649void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003650{
Peter Maydell50013112015-04-26 16:49:24 +01003651 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003652}
3653
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003654void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003655{
Peter Maydell50013112015-04-26 16:49:24 +01003656 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003657}
3658
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003659void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003660{
Peter Maydell50013112015-04-26 16:49:24 +01003661 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003662}
3663
aliguori5e2972f2009-03-28 17:51:36 +00003664/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003665int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003666 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003667{
3668 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003669 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003670 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003671
3672 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003673 int asidx;
3674 MemTxAttrs attrs;
3675
bellard13eb76e2004-01-24 15:23:36 +00003676 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003677 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3678 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003679 /* if no physical page mapped, return an error */
3680 if (phys_addr == -1)
3681 return -1;
3682 l = (page + TARGET_PAGE_SIZE) - addr;
3683 if (l > len)
3684 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003685 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003686 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003687 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3688 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003689 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003690 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3691 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003692 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003693 }
bellard13eb76e2004-01-24 15:23:36 +00003694 len -= l;
3695 buf += l;
3696 addr += l;
3697 }
3698 return 0;
3699}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003700
3701/*
3702 * Allows code that needs to deal with migration bitmaps etc to still be built
3703 * target independent.
3704 */
3705size_t qemu_target_page_bits(void)
3706{
3707 return TARGET_PAGE_BITS;
3708}
3709
Paul Brooka68fe892010-03-01 00:08:59 +00003710#endif
bellard13eb76e2004-01-24 15:23:36 +00003711
Blue Swirl8e4a4242013-01-06 18:30:17 +00003712/*
3713 * A helper function for the _utterly broken_ virtio device model to find out if
3714 * it's running on a big endian machine. Don't do this at home kids!
3715 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003716bool target_words_bigendian(void);
3717bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003718{
3719#if defined(TARGET_WORDS_BIGENDIAN)
3720 return true;
3721#else
3722 return false;
3723#endif
3724}
3725
Wen Congyang76f35532012-05-07 12:04:18 +08003726#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003727bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003728{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003729 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003730 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003731 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003732
Paolo Bonzini41063e12015-03-18 14:21:43 +01003733 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003734 mr = address_space_translate(&address_space_memory,
3735 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003736
Paolo Bonzini41063e12015-03-18 14:21:43 +01003737 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3738 rcu_read_unlock();
3739 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003740}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003741
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003742int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003743{
3744 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003745 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003746
Mike Day0dc3f442013-09-05 14:41:35 -04003747 rcu_read_lock();
3748 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003749 ret = func(block->idstr, block->host, block->offset,
3750 block->used_length, opaque);
3751 if (ret) {
3752 break;
3753 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003754 }
Mike Day0dc3f442013-09-05 14:41:35 -04003755 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003756 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003757}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003758#endif