blob: 3d867f166cc6930b7073bd2d1f17b15faf41ad69 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Peter Maydell20bccb82016-10-24 16:26:49 +010096#ifdef TARGET_PAGE_BITS_VARY
97int target_page_bits;
98bool target_page_bits_decided;
99#endif
100
Andreas Färberbdc44642013-06-24 23:50:24 +0200101struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +0000102/* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +0200104__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000105/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000106 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000107 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100108int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000109
Peter Maydell20bccb82016-10-24 16:26:49 +0100110bool set_preferred_target_page_bits(int bits)
111{
112 /* The target page size is the lowest common denominator for all
113 * the CPUs in the system, so we can only make it smaller, never
114 * larger. And we can't make it smaller once we've committed to
115 * a particular size.
116 */
117#ifdef TARGET_PAGE_BITS_VARY
118 assert(bits >= TARGET_PAGE_BITS_MIN);
119 if (target_page_bits == 0 || target_page_bits > bits) {
120 if (target_page_bits_decided) {
121 return false;
122 }
123 target_page_bits = bits;
124 }
125#endif
126 return true;
127}
128
pbrooke2eef172008-06-08 01:09:01 +0000129#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200130
Peter Maydell20bccb82016-10-24 16:26:49 +0100131static void finalize_target_page_bits(void)
132{
133#ifdef TARGET_PAGE_BITS_VARY
134 if (target_page_bits == 0) {
135 target_page_bits = TARGET_PAGE_BITS_MIN;
136 }
137 target_page_bits_decided = true;
138#endif
139}
140
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141typedef struct PhysPageEntry PhysPageEntry;
142
143struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200144 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200145 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200146 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200147 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200150#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
151
Paolo Bonzini03f49952013-11-07 17:14:36 +0100152/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100153#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100154
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200155#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100156#define P_L2_SIZE (1 << P_L2_BITS)
157
158#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
159
160typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200161
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200162typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100163 struct rcu_head rcu;
164
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 unsigned sections_nb;
166 unsigned sections_nb_alloc;
167 unsigned nodes_nb;
168 unsigned nodes_nb_alloc;
169 Node *nodes;
170 MemoryRegionSection *sections;
171} PhysPageMap;
172
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200173struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100174 struct rcu_head rcu;
175
Fam Zheng729633c2016-03-01 14:18:24 +0800176 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200177 /* This is a multi-level map on the physical address space.
178 * The bottom level has pointers to MemoryRegionSections.
179 */
180 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200181 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200182 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200183};
184
Jan Kiszka90260c62013-05-26 21:46:51 +0200185#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186typedef struct subpage_t {
187 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200188 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200189 hwaddr base;
Vijaya Kumar K2615fab2016-10-24 16:26:49 +0100190 uint16_t sub_section[];
Jan Kiszka90260c62013-05-26 21:46:51 +0200191} subpage_t;
192
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200193#define PHYS_SECTION_UNASSIGNED 0
194#define PHYS_SECTION_NOTDIRTY 1
195#define PHYS_SECTION_ROM 2
196#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
pbrooke2eef172008-06-08 01:09:01 +0000198static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300199static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000200static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000201
Avi Kivity1ec9b902012-01-02 12:47:48 +0200202static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100203
204/**
205 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
206 * @cpu: the CPU whose AddressSpace this is
207 * @as: the AddressSpace itself
208 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
209 * @tcg_as_listener: listener for tracking changes to the AddressSpace
210 */
211struct CPUAddressSpace {
212 CPUState *cpu;
213 AddressSpace *as;
214 struct AddressSpaceDispatch *memory_dispatch;
215 MemoryListener tcg_as_listener;
216};
217
pbrook6658ffb2007-03-16 23:58:11 +0000218#endif
bellard54936002003-05-13 00:25:15 +0000219
Paul Brook6d9a1302010-02-28 23:55:53 +0000220#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200221
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200222static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223{
Peter Lieven101420b2016-07-15 12:03:50 +0200224 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200225 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200226 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200227 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
228 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200229 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200230 }
231}
232
Paolo Bonzinidb946042015-05-21 15:12:29 +0200233static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200234{
235 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200236 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200237 PhysPageEntry e;
238 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200239
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200240 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200241 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200242 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200243 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200244
245 e.skip = leaf ? 0 : 1;
246 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100247 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200248 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200249 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200250 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200251}
252
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200253static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
254 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200255 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200256{
257 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100258 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200259
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200260 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200261 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200262 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200263 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100264 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200265
Paolo Bonzini03f49952013-11-07 17:14:36 +0100266 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200267 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200268 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200269 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200270 *index += step;
271 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200272 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200273 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200274 }
275 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200276 }
277}
278
Avi Kivityac1970f2012-10-03 16:22:53 +0200279static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200280 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200281 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000282{
Avi Kivity29990972012-02-13 20:21:20 +0200283 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200284 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000285
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200286 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000287}
288
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200289/* Compact a non leaf page entry. Simply detect that the entry has a single child,
290 * and update our entry so we can skip it and go directly to the destination.
291 */
Marc-André Lureauefee6782016-09-28 16:37:20 +0400292static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200293{
294 unsigned valid_ptr = P_L2_SIZE;
295 int valid = 0;
296 PhysPageEntry *p;
297 int i;
298
299 if (lp->ptr == PHYS_MAP_NODE_NIL) {
300 return;
301 }
302
303 p = nodes[lp->ptr];
304 for (i = 0; i < P_L2_SIZE; i++) {
305 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
306 continue;
307 }
308
309 valid_ptr = i;
310 valid++;
311 if (p[i].skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400312 phys_page_compact(&p[i], nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200313 }
314 }
315
316 /* We can only compress if there's only one child. */
317 if (valid != 1) {
318 return;
319 }
320
321 assert(valid_ptr < P_L2_SIZE);
322
323 /* Don't compress if it won't fit in the # of bits we have. */
324 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
325 return;
326 }
327
328 lp->ptr = p[valid_ptr].ptr;
329 if (!p[valid_ptr].skip) {
330 /* If our only child is a leaf, make this a leaf. */
331 /* By design, we should have made this node a leaf to begin with so we
332 * should never reach here.
333 * But since it's so simple to handle this, let's do it just in case we
334 * change this rule.
335 */
336 lp->skip = 0;
337 } else {
338 lp->skip += p[valid_ptr].skip;
339 }
340}
341
342static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
343{
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 if (d->phys_map.skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400345 phys_page_compact(&d->phys_map, d->map.nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200346 }
347}
348
Fam Zheng29cb5332016-03-01 14:18:23 +0800349static inline bool section_covers_addr(const MemoryRegionSection *section,
350 hwaddr addr)
351{
352 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
353 * the section must cover the entire address space.
354 */
Richard Henderson258dfaa2016-06-29 15:48:03 -0700355 return int128_gethi(section->size) ||
Fam Zheng29cb5332016-03-01 14:18:23 +0800356 range_covers_byte(section->offset_within_address_space,
Richard Henderson258dfaa2016-06-29 15:48:03 -0700357 int128_getlo(section->size), addr);
Fam Zheng29cb5332016-03-01 14:18:23 +0800358}
359
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200360static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200361 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000362{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200363 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200364 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200365 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200366
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200367 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200368 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200369 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200370 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200371 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100372 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200373 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200374
Fam Zheng29cb5332016-03-01 14:18:23 +0800375 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200376 return &sections[lp.ptr];
377 } else {
378 return &sections[PHYS_SECTION_UNASSIGNED];
379 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200380}
381
Blue Swirle5548612012-04-21 13:08:33 +0000382bool memory_region_is_unassigned(MemoryRegion *mr)
383{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200384 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000385 && mr != &io_mem_watch;
386}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100388/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200389static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200390 hwaddr addr,
391 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200392{
Fam Zheng729633c2016-03-01 14:18:24 +0800393 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200394 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800395 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200396
Fam Zheng729633c2016-03-01 14:18:24 +0800397 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
398 section_covers_addr(section, addr)) {
399 update = false;
400 } else {
401 section = phys_page_find(d->phys_map, addr, d->map.nodes,
402 d->map.sections);
403 update = true;
404 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200405 if (resolve_subpage && section->mr->subpage) {
406 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200407 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200408 }
Fam Zheng729633c2016-03-01 14:18:24 +0800409 if (update) {
410 atomic_set(&d->mru_section, section);
411 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200412 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200413}
414
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100415/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200416static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200417address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200418 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200419{
420 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200421 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100422 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200423
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200424 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200425 /* Compute offset within MemoryRegionSection */
426 addr -= section->offset_within_address_space;
427
428 /* Compute offset within MemoryRegion */
429 *xlat = addr + section->offset_within_region;
430
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200431 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200432
433 /* MMIO registers can be expected to perform full-width accesses based only
434 * on their address, without considering adjacent registers that could
435 * decode to completely different MemoryRegions. When such registers
436 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
437 * regions overlap wildly. For this reason we cannot clamp the accesses
438 * here.
439 *
440 * If the length is small (as is the case for address_space_ldl/stl),
441 * everything works fine. If the incoming length is large, however,
442 * the caller really has to do the clamping through memory_access_size.
443 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200444 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200445 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200446 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
447 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200448 return section;
449}
Jan Kiszka90260c62013-05-26 21:46:51 +0200450
Paolo Bonzini41063e12015-03-18 14:21:43 +0100451/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200452MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
453 hwaddr *xlat, hwaddr *plen,
454 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200455{
Avi Kivity30951152012-10-30 13:47:46 +0200456 IOMMUTLBEntry iotlb;
457 MemoryRegionSection *section;
458 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200459
460 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100461 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
462 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200463 mr = section->mr;
464
465 if (!mr->iommu_ops) {
466 break;
467 }
468
Le Tan8d7b8cb2014-08-16 13:55:37 +0800469 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200470 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
471 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700472 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200473 if (!(iotlb.perm & (1 << is_write))) {
474 mr = &io_mem_unassigned;
475 break;
476 }
477
478 as = iotlb.target_as;
479 }
480
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000481 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100482 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700483 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100484 }
485
Avi Kivity30951152012-10-30 13:47:46 +0200486 *xlat = addr;
487 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200488}
489
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100490/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200491MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000492address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200493 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200494{
Avi Kivity30951152012-10-30 13:47:46 +0200495 MemoryRegionSection *section;
Alex Bennéef35e44e2016-10-21 16:34:18 +0100496 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
Peter Maydelld7898cd2016-01-21 14:15:05 +0000497
498 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200499
500 assert(!section->mr->iommu_ops);
501 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200502}
bellard9fa3e852004-01-04 18:06:42 +0000503#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000504
Andreas Färberb170fce2013-01-20 20:23:22 +0100505#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000506
Juan Quintelae59fb372009-09-29 22:48:21 +0200507static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200508{
Andreas Färber259186a2013-01-17 18:51:17 +0100509 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510
aurel323098dba2009-03-07 21:28:24 +0000511 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
512 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100513 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100514 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000515
516 return 0;
517}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200518
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400519static int cpu_common_pre_load(void *opaque)
520{
521 CPUState *cpu = opaque;
522
Paolo Bonziniadee6422014-12-19 12:53:14 +0100523 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524
525 return 0;
526}
527
528static bool cpu_common_exception_index_needed(void *opaque)
529{
530 CPUState *cpu = opaque;
531
Paolo Bonziniadee6422014-12-19 12:53:14 +0100532 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400533}
534
535static const VMStateDescription vmstate_cpu_common_exception_index = {
536 .name = "cpu_common/exception_index",
537 .version_id = 1,
538 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200539 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400540 .fields = (VMStateField[]) {
541 VMSTATE_INT32(exception_index, CPUState),
542 VMSTATE_END_OF_LIST()
543 }
544};
545
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300546static bool cpu_common_crash_occurred_needed(void *opaque)
547{
548 CPUState *cpu = opaque;
549
550 return cpu->crash_occurred;
551}
552
553static const VMStateDescription vmstate_cpu_common_crash_occurred = {
554 .name = "cpu_common/crash_occurred",
555 .version_id = 1,
556 .minimum_version_id = 1,
557 .needed = cpu_common_crash_occurred_needed,
558 .fields = (VMStateField[]) {
559 VMSTATE_BOOL(crash_occurred, CPUState),
560 VMSTATE_END_OF_LIST()
561 }
562};
563
Andreas Färber1a1562f2013-06-17 04:09:11 +0200564const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200565 .name = "cpu_common",
566 .version_id = 1,
567 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400568 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200569 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200570 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100571 VMSTATE_UINT32(halted, CPUState),
572 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200573 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400574 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200575 .subsections = (const VMStateDescription*[]) {
576 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300577 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200578 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200579 }
580};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200581
pbrook9656f322008-07-01 20:01:19 +0000582#endif
583
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100584CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400585{
Andreas Färberbdc44642013-06-24 23:50:24 +0200586 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400587
Andreas Färberbdc44642013-06-24 23:50:24 +0200588 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100589 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200590 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100591 }
Glauber Costa950f1472009-06-09 12:15:18 -0400592 }
593
Andreas Färberbdc44642013-06-24 23:50:24 +0200594 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400595}
596
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000597#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000598void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000600 CPUAddressSpace *newas;
601
602 /* Target code should have set num_ases before calling us */
603 assert(asidx < cpu->num_ases);
604
Peter Maydell56943e82016-01-21 14:15:04 +0000605 if (asidx == 0) {
606 /* address space 0 gets the convenience alias */
607 cpu->as = as;
608 }
609
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000610 /* KVM cannot currently support multiple address spaces. */
611 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000612
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000613 if (!cpu->cpu_ases) {
614 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000615 }
Peter Maydell32857f42015-10-01 15:29:50 +0100616
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000617 newas = &cpu->cpu_ases[asidx];
618 newas->cpu = cpu;
619 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000620 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000621 newas->tcg_as_listener.commit = tcg_commit;
622 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000623 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000624}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000625
626AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
627{
628 /* Return the AddressSpace corresponding to the specified index */
629 return cpu->cpu_ases[asidx].as;
630}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000631#endif
632
Laurent Vivier7bbc1242016-10-20 13:26:04 +0200633void cpu_exec_unrealizefn(CPUState *cpu)
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530634{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530635 CPUClass *cc = CPU_GET_CLASS(cpu);
636
Paolo Bonzini267f6852016-08-28 03:45:14 +0200637 cpu_list_remove(cpu);
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530638
639 if (cc->vmsd != NULL) {
640 vmstate_unregister(NULL, cc->vmsd, cpu);
641 }
642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
643 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
644 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530645}
646
Laurent Vivier39e329e2016-10-20 13:26:02 +0200647void cpu_exec_initfn(CPUState *cpu)
bellardfd6ce8f2003-05-14 19:00:11 +0000648{
Peter Maydell56943e82016-01-21 14:15:04 +0000649 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000650 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000651
Eduardo Habkost291135b2015-04-27 17:00:33 -0300652#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300653 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000654
655 /* This is a softmmu CPU object, so create a property for it
656 * so users can wire up its memory. (This can't go in qom/cpu.c
657 * because that file is compiled only once for both user-mode
658 * and system builds.) The default if no link is set up is to use
659 * the system address space.
660 */
661 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
662 (Object **)&cpu->memory,
663 qdev_prop_allow_set_link_before_realize,
664 OBJ_PROP_LINK_UNREF_ON_RELEASE,
665 &error_abort);
666 cpu->memory = system_memory;
667 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300668#endif
Laurent Vivier39e329e2016-10-20 13:26:02 +0200669}
670
Laurent Vivierce5b1bb2016-10-20 13:26:03 +0200671void cpu_exec_realizefn(CPUState *cpu, Error **errp)
Laurent Vivier39e329e2016-10-20 13:26:02 +0200672{
673 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Eduardo Habkost291135b2015-04-27 17:00:33 -0300674
Paolo Bonzini267f6852016-08-28 03:45:14 +0200675 cpu_list_add(cpu);
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200676
677#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200678 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200679 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200680 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100681 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200682 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100683 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200684#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000685}
686
Paul Brook94df27f2010-02-28 23:47:45 +0000687#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200688static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000689{
Alex Bennéeba051fb2016-10-27 16:10:16 +0100690 mmap_lock();
691 tb_lock();
Paul Brook94df27f2010-02-28 23:47:45 +0000692 tb_invalidate_phys_page_range(pc, pc + 1, 0);
Alex Bennéeba051fb2016-10-27 16:10:16 +0100693 tb_unlock();
694 mmap_unlock();
Paul Brook94df27f2010-02-28 23:47:45 +0000695}
696#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200697static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400698{
Peter Maydell5232e4c2016-01-21 14:15:06 +0000699 MemTxAttrs attrs;
700 hwaddr phys = cpu_get_phys_page_attrs_debug(cpu, pc, &attrs);
701 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Max Filippove8262a12013-09-27 22:29:17 +0400702 if (phys != -1) {
Alex Bennéeba051fb2016-10-27 16:10:16 +0100703 /* Locks grabbed by tb_invalidate_phys_addr */
Peter Maydell5232e4c2016-01-21 14:15:06 +0000704 tb_invalidate_phys_addr(cpu->cpu_ases[asidx].as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100705 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400706 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400707}
bellardc27004e2005-01-03 23:35:10 +0000708#endif
bellardd720b932004-04-25 17:57:43 +0000709
Paul Brookc527ee82010-03-01 03:31:14 +0000710#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200711void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000712
713{
714}
715
Peter Maydell3ee887e2014-09-12 14:06:48 +0100716int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
717 int flags)
718{
719 return -ENOSYS;
720}
721
722void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
723{
724}
725
Andreas Färber75a34032013-09-02 16:57:02 +0200726int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000727 int flags, CPUWatchpoint **watchpoint)
728{
729 return -ENOSYS;
730}
731#else
pbrook6658ffb2007-03-16 23:58:11 +0000732/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200733int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000734 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000735{
aliguoric0ce9982008-11-25 22:13:57 +0000736 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000737
Peter Maydell05068c02014-09-12 14:06:48 +0100738 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700739 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200740 error_report("tried to set invalid watchpoint at %"
741 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000742 return -EINVAL;
743 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500744 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000745
aliguoria1d1bb32008-11-18 20:07:32 +0000746 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100747 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000748 wp->flags = flags;
749
aliguori2dc9f412008-11-18 20:56:59 +0000750 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200751 if (flags & BP_GDB) {
752 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
753 } else {
754 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
755 }
aliguoria1d1bb32008-11-18 20:07:32 +0000756
Andreas Färber31b030d2013-09-04 01:29:02 +0200757 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000758
759 if (watchpoint)
760 *watchpoint = wp;
761 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000762}
763
aliguoria1d1bb32008-11-18 20:07:32 +0000764/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200765int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000766 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000767{
aliguoria1d1bb32008-11-18 20:07:32 +0000768 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000769
Andreas Färberff4700b2013-08-26 18:23:18 +0200770 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100771 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000772 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200773 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000774 return 0;
775 }
776 }
aliguoria1d1bb32008-11-18 20:07:32 +0000777 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000778}
779
aliguoria1d1bb32008-11-18 20:07:32 +0000780/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200781void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000782{
Andreas Färberff4700b2013-08-26 18:23:18 +0200783 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000784
Andreas Färber31b030d2013-09-04 01:29:02 +0200785 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000786
Anthony Liguori7267c092011-08-20 22:09:37 -0500787 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000788}
789
aliguoria1d1bb32008-11-18 20:07:32 +0000790/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200791void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000792{
aliguoric0ce9982008-11-25 22:13:57 +0000793 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000794
Andreas Färberff4700b2013-08-26 18:23:18 +0200795 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200796 if (wp->flags & mask) {
797 cpu_watchpoint_remove_by_ref(cpu, wp);
798 }
aliguoric0ce9982008-11-25 22:13:57 +0000799 }
aliguoria1d1bb32008-11-18 20:07:32 +0000800}
Peter Maydell05068c02014-09-12 14:06:48 +0100801
802/* Return true if this watchpoint address matches the specified
803 * access (ie the address range covered by the watchpoint overlaps
804 * partially or completely with the address range covered by the
805 * access).
806 */
807static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
808 vaddr addr,
809 vaddr len)
810{
811 /* We know the lengths are non-zero, but a little caution is
812 * required to avoid errors in the case where the range ends
813 * exactly at the top of the address space and so addr + len
814 * wraps round to zero.
815 */
816 vaddr wpend = wp->vaddr + wp->len - 1;
817 vaddr addrend = addr + len - 1;
818
819 return !(addr > wpend || wp->vaddr > addrend);
820}
821
Paul Brookc527ee82010-03-01 03:31:14 +0000822#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000823
824/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200825int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000826 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000827{
aliguoric0ce9982008-11-25 22:13:57 +0000828 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000829
Anthony Liguori7267c092011-08-20 22:09:37 -0500830 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000831
832 bp->pc = pc;
833 bp->flags = flags;
834
aliguori2dc9f412008-11-18 20:56:59 +0000835 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200836 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200837 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200838 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200839 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200840 }
aliguoria1d1bb32008-11-18 20:07:32 +0000841
Andreas Färberf0c3c502013-08-26 21:22:53 +0200842 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000843
Andreas Färber00b941e2013-06-29 18:55:54 +0200844 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000845 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200846 }
aliguoria1d1bb32008-11-18 20:07:32 +0000847 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000848}
849
850/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200851int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000852{
aliguoria1d1bb32008-11-18 20:07:32 +0000853 CPUBreakpoint *bp;
854
Andreas Färberf0c3c502013-08-26 21:22:53 +0200855 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000856 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200857 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000858 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000859 }
bellard4c3a88a2003-07-26 12:06:08 +0000860 }
aliguoria1d1bb32008-11-18 20:07:32 +0000861 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000862}
863
aliguoria1d1bb32008-11-18 20:07:32 +0000864/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200865void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000866{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200867 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
868
869 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000870
Anthony Liguori7267c092011-08-20 22:09:37 -0500871 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000872}
873
874/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200875void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000876{
aliguoric0ce9982008-11-25 22:13:57 +0000877 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000878
Andreas Färberf0c3c502013-08-26 21:22:53 +0200879 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200880 if (bp->flags & mask) {
881 cpu_breakpoint_remove_by_ref(cpu, bp);
882 }
aliguoric0ce9982008-11-25 22:13:57 +0000883 }
bellard4c3a88a2003-07-26 12:06:08 +0000884}
885
bellardc33a3462003-07-29 20:50:33 +0000886/* enable or disable single step mode. EXCP_DEBUG is returned by the
887 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200888void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000889{
Andreas Färbered2803d2013-06-21 20:20:45 +0200890 if (cpu->singlestep_enabled != enabled) {
891 cpu->singlestep_enabled = enabled;
892 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200893 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200894 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100895 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000896 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700897 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000898 }
bellardc33a3462003-07-29 20:50:33 +0000899 }
bellardc33a3462003-07-29 20:50:33 +0000900}
901
Andreas Färbera47dddd2013-09-03 17:38:47 +0200902void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000903{
904 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000905 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000906
907 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000908 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000909 fprintf(stderr, "qemu: fatal: ");
910 vfprintf(stderr, fmt, ap);
911 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200912 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100913 if (qemu_log_separate()) {
Richard Henderson1ee73212016-09-22 15:17:10 -0700914 qemu_log_lock();
aliguori93fcfe32009-01-15 22:34:14 +0000915 qemu_log("qemu: fatal: ");
916 qemu_log_vprintf(fmt, ap2);
917 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200918 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000919 qemu_log_flush();
Richard Henderson1ee73212016-09-22 15:17:10 -0700920 qemu_log_unlock();
aliguori93fcfe32009-01-15 22:34:14 +0000921 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000922 }
pbrook493ae1f2007-11-23 16:53:59 +0000923 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000924 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300925 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200926#if defined(CONFIG_USER_ONLY)
927 {
928 struct sigaction act;
929 sigfillset(&act.sa_mask);
930 act.sa_handler = SIG_DFL;
931 sigaction(SIGABRT, &act, NULL);
932 }
933#endif
bellard75012672003-06-21 13:11:07 +0000934 abort();
935}
936
bellard01243112004-01-04 15:48:17 +0000937#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400938/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200939static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
940{
941 RAMBlock *block;
942
Paolo Bonzini43771532013-09-09 17:58:40 +0200943 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200944 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200945 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200946 }
Mike Day0dc3f442013-09-05 14:41:35 -0400947 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200948 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200949 goto found;
950 }
951 }
952
953 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
954 abort();
955
956found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200957 /* It is safe to write mru_block outside the iothread lock. This
958 * is what happens:
959 *
960 * mru_block = xxx
961 * rcu_read_unlock()
962 * xxx removed from list
963 * rcu_read_lock()
964 * read mru_block
965 * mru_block = NULL;
966 * call_rcu(reclaim_ramblock, xxx);
967 * rcu_read_unlock()
968 *
969 * atomic_rcu_set is not needed here. The block was already published
970 * when it was placed into the list. Here we're just making an extra
971 * copy of the pointer.
972 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200973 ram_list.mru_block = block;
974 return block;
975}
976
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200977static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000978{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700979 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200980 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200981 RAMBlock *block;
982 ram_addr_t end;
983
984 end = TARGET_PAGE_ALIGN(start + length);
985 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000986
Mike Day0dc3f442013-09-05 14:41:35 -0400987 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200988 block = qemu_get_ram_block(start);
989 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200990 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700991 CPU_FOREACH(cpu) {
992 tlb_reset_dirty(cpu, start1, length);
993 }
Mike Day0dc3f442013-09-05 14:41:35 -0400994 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200995}
996
997/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000998bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
999 ram_addr_t length,
1000 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +02001001{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001002 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001003 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001004 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +02001005
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001006 if (length == 0) {
1007 return false;
1008 }
1009
1010 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
1011 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001012
1013 rcu_read_lock();
1014
1015 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1016
1017 while (page < end) {
1018 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1019 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1020 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1021
1022 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1023 offset, num);
1024 page += num;
1025 }
1026
1027 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001028
1029 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001030 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001031 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001032
1033 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001034}
1035
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001036/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001037hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001038 MemoryRegionSection *section,
1039 target_ulong vaddr,
1040 hwaddr paddr, hwaddr xlat,
1041 int prot,
1042 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001043{
Avi Kivitya8170e52012-10-23 12:30:10 +02001044 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001045 CPUWatchpoint *wp;
1046
Blue Swirlcc5bea62012-04-14 14:56:48 +00001047 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001048 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001049 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001050 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001051 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001052 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001053 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001054 }
1055 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001056 AddressSpaceDispatch *d;
1057
1058 d = atomic_rcu_read(&section->address_space->dispatch);
1059 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001060 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001061 }
1062
1063 /* Make accesses to pages with watchpoints go via the
1064 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001065 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001066 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001067 /* Avoid trapping reads of pages with a write breakpoint. */
1068 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001069 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001070 *address |= TLB_MMIO;
1071 break;
1072 }
1073 }
1074 }
1075
1076 return iotlb;
1077}
bellard9fa3e852004-01-04 18:06:42 +00001078#endif /* defined(CONFIG_USER_ONLY) */
1079
pbrooke2eef172008-06-08 01:09:01 +00001080#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001081
Anthony Liguoric227f092009-10-01 16:12:16 -05001082static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001083 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001084static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001085
Igor Mammedova2b257d2014-10-31 16:38:37 +00001086static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1087 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001088
1089/*
1090 * Set a custom physical guest memory alloator.
1091 * Accelerators with unusual needs may need this. Hopefully, we can
1092 * get rid of it eventually.
1093 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001094void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001095{
1096 phys_mem_alloc = alloc;
1097}
1098
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001099static uint16_t phys_section_add(PhysPageMap *map,
1100 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001101{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001102 /* The physical section number is ORed with a page-aligned
1103 * pointer to produce the iotlb entries. Thus it should
1104 * never overflow into the page-aligned value.
1105 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001106 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001107
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001108 if (map->sections_nb == map->sections_nb_alloc) {
1109 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1110 map->sections = g_renew(MemoryRegionSection, map->sections,
1111 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001112 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001113 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001114 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001115 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001116}
1117
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001118static void phys_section_destroy(MemoryRegion *mr)
1119{
Don Slutz55b4e802015-11-30 17:11:04 -05001120 bool have_sub_page = mr->subpage;
1121
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001122 memory_region_unref(mr);
1123
Don Slutz55b4e802015-11-30 17:11:04 -05001124 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001125 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001126 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001127 g_free(subpage);
1128 }
1129}
1130
Paolo Bonzini60926662013-05-29 12:30:26 +02001131static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001132{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001133 while (map->sections_nb > 0) {
1134 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001135 phys_section_destroy(section->mr);
1136 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001137 g_free(map->sections);
1138 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001139}
1140
Avi Kivityac1970f2012-10-03 16:22:53 +02001141static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001142{
1143 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001144 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001145 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001146 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001147 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001148 MemoryRegionSection subsection = {
1149 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001150 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001151 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001152 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001153
Avi Kivityf3705d52012-03-08 16:16:34 +02001154 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001155
Avi Kivityf3705d52012-03-08 16:16:34 +02001156 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001157 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001158 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001159 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001160 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001161 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001162 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001163 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001164 }
1165 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001166 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001167 subpage_register(subpage, start, end,
1168 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001169}
1170
1171
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001172static void register_multipage(AddressSpaceDispatch *d,
1173 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001174{
Avi Kivitya8170e52012-10-23 12:30:10 +02001175 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001176 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001177 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1178 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001179
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001180 assert(num_pages);
1181 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001182}
1183
Avi Kivityac1970f2012-10-03 16:22:53 +02001184static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001185{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001186 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001187 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001188 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001189 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001190
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001191 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1192 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1193 - now.offset_within_address_space;
1194
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001195 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001196 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001197 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001199 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001200 while (int128_ne(remain.size, now.size)) {
1201 remain.size = int128_sub(remain.size, now.size);
1202 remain.offset_within_address_space += int128_get64(now.size);
1203 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001204 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001205 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001206 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001207 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001208 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001209 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001210 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001211 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001212 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001213 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001214 }
1215}
1216
Sheng Yang62a27442010-01-26 19:21:16 +08001217void qemu_flush_coalesced_mmio_buffer(void)
1218{
1219 if (kvm_enabled())
1220 kvm_flush_coalesced_mmio_buffer();
1221}
1222
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001223void qemu_mutex_lock_ramlist(void)
1224{
1225 qemu_mutex_lock(&ram_list.mutex);
1226}
1227
1228void qemu_mutex_unlock_ramlist(void)
1229{
1230 qemu_mutex_unlock(&ram_list.mutex);
1231}
1232
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001233#ifdef __linux__
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001234static int64_t get_file_size(int fd)
1235{
1236 int64_t size = lseek(fd, 0, SEEK_END);
1237 if (size < 0) {
1238 return -errno;
1239 }
1240 return size;
1241}
1242
Alex Williamson04b16652010-07-02 11:13:17 -06001243static void *file_ram_alloc(RAMBlock *block,
1244 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001245 const char *path,
1246 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001247{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001248 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001249 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001250 char *sanitized_name;
1251 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001252 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001253 int fd = -1;
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001254 int64_t file_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001255
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001256 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1257 error_setg(errp,
1258 "host lacks kvm mmu notifiers, -mem-path unsupported");
1259 return NULL;
1260 }
1261
1262 for (;;) {
1263 fd = open(path, O_RDWR);
1264 if (fd >= 0) {
1265 /* @path names an existing file, use it */
1266 break;
1267 }
1268 if (errno == ENOENT) {
1269 /* @path names a file that doesn't exist, create it */
1270 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1271 if (fd >= 0) {
1272 unlink_on_error = true;
1273 break;
1274 }
1275 } else if (errno == EISDIR) {
1276 /* @path names a directory, create a file there */
1277 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1278 sanitized_name = g_strdup(memory_region_name(block->mr));
1279 for (c = sanitized_name; *c != '\0'; c++) {
1280 if (*c == '/') {
1281 *c = '_';
1282 }
1283 }
1284
1285 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1286 sanitized_name);
1287 g_free(sanitized_name);
1288
1289 fd = mkstemp(filename);
1290 if (fd >= 0) {
1291 unlink(filename);
1292 g_free(filename);
1293 break;
1294 }
1295 g_free(filename);
1296 }
1297 if (errno != EEXIST && errno != EINTR) {
1298 error_setg_errno(errp, errno,
1299 "can't open backing store %s for guest RAM",
1300 path);
1301 goto error;
1302 }
1303 /*
1304 * Try again on EINTR and EEXIST. The latter happens when
1305 * something else creates the file between our two open().
1306 */
1307 }
1308
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001309 block->page_size = qemu_fd_getpagesize(fd);
Haozhong Zhang83606682016-10-24 20:49:37 +08001310 block->mr->align = block->page_size;
1311#if defined(__s390x__)
1312 if (kvm_enabled()) {
1313 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1314 }
1315#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03001316
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001317 file_size = get_file_size(fd);
1318
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001319 if (memory < block->page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001320 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001321 "or larger than page size 0x%zx",
1322 memory, block->page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001323 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001324 }
1325
Haozhong Zhang1775f112016-11-02 09:05:51 +08001326 if (file_size > 0 && file_size < memory) {
1327 error_setg(errp, "backing store %s size 0x%" PRIx64
1328 " does not match 'size' option 0x" RAM_ADDR_FMT,
1329 path, file_size, memory);
1330 goto error;
1331 }
1332
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001333 memory = ROUND_UP(memory, block->page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001334
1335 /*
1336 * ftruncate is not supported by hugetlbfs in older
1337 * hosts, so don't bother bailing out on errors.
1338 * If anything goes wrong with it under other filesystems,
1339 * mmap will fail.
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001340 *
1341 * Do not truncate the non-empty backend file to avoid corrupting
1342 * the existing data in the file. Disabling shrinking is not
1343 * enough. For example, the current vNVDIMM implementation stores
1344 * the guest NVDIMM labels at the end of the backend file. If the
1345 * backend file is later extended, QEMU will not be able to find
1346 * those labels. Therefore, extending the non-empty backend file
1347 * is disabled as well.
Marcelo Tosattic9027602010-03-01 20:25:08 -03001348 */
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001349 if (!file_size && ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001350 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001351 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001352
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001353 area = qemu_ram_mmap(fd, memory, block->mr->align,
1354 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001355 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001356 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001357 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001358 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001359 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001360
1361 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001362 os_mem_prealloc(fd, area, memory, errp);
1363 if (errp && *errp) {
1364 goto error;
1365 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001366 }
1367
Alex Williamson04b16652010-07-02 11:13:17 -06001368 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001369 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001370
1371error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001372 if (area != MAP_FAILED) {
1373 qemu_ram_munmap(area, memory);
1374 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001375 if (unlink_on_error) {
1376 unlink(path);
1377 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001378 if (fd != -1) {
1379 close(fd);
1380 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001381 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001382}
1383#endif
1384
Mike Day0dc3f442013-09-05 14:41:35 -04001385/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001386static ram_addr_t find_ram_offset(ram_addr_t size)
1387{
Alex Williamson04b16652010-07-02 11:13:17 -06001388 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001389 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001390
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001391 assert(size != 0); /* it would hand out same offset multiple times */
1392
Mike Day0dc3f442013-09-05 14:41:35 -04001393 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001394 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001395 }
Alex Williamson04b16652010-07-02 11:13:17 -06001396
Mike Day0dc3f442013-09-05 14:41:35 -04001397 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001398 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001399
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001400 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001401
Mike Day0dc3f442013-09-05 14:41:35 -04001402 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001403 if (next_block->offset >= end) {
1404 next = MIN(next, next_block->offset);
1405 }
1406 }
1407 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001408 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001409 mingap = next - end;
1410 }
1411 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001412
1413 if (offset == RAM_ADDR_MAX) {
1414 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1415 (uint64_t)size);
1416 abort();
1417 }
1418
Alex Williamson04b16652010-07-02 11:13:17 -06001419 return offset;
1420}
1421
Juan Quintela652d7ec2012-07-20 10:37:54 +02001422ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001423{
Alex Williamsond17b5282010-06-25 11:08:38 -06001424 RAMBlock *block;
1425 ram_addr_t last = 0;
1426
Mike Day0dc3f442013-09-05 14:41:35 -04001427 rcu_read_lock();
1428 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001429 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001430 }
Mike Day0dc3f442013-09-05 14:41:35 -04001431 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001432 return last;
1433}
1434
Jason Baronddb97f12012-08-02 15:44:16 -04001435static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1436{
1437 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001438
1439 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001440 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001441 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1442 if (ret) {
1443 perror("qemu_madvise");
1444 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1445 "but dump_guest_core=off specified\n");
1446 }
1447 }
1448}
1449
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001450const char *qemu_ram_get_idstr(RAMBlock *rb)
1451{
1452 return rb->idstr;
1453}
1454
Mike Dayae3a7042013-09-05 14:41:35 -04001455/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001456void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001457{
Gongleifa53a0e2016-05-10 10:04:59 +08001458 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001459
Avi Kivityc5705a72011-12-20 15:59:12 +02001460 assert(new_block);
1461 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001462
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001463 if (dev) {
1464 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001465 if (id) {
1466 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001467 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001468 }
1469 }
1470 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1471
Gongleiab0a9952016-05-10 10:05:00 +08001472 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001473 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001474 if (block != new_block &&
1475 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001476 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1477 new_block->idstr);
1478 abort();
1479 }
1480 }
Mike Day0dc3f442013-09-05 14:41:35 -04001481 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001482}
1483
Mike Dayae3a7042013-09-05 14:41:35 -04001484/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001485void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001486{
Mike Dayae3a7042013-09-05 14:41:35 -04001487 /* FIXME: arch_init.c assumes that this is not called throughout
1488 * migration. Ignore the problem since hot-unplug during migration
1489 * does not work anyway.
1490 */
Hu Tao20cfe882014-04-02 15:13:26 +08001491 if (block) {
1492 memset(block->idstr, 0, sizeof(block->idstr));
1493 }
1494}
1495
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001496size_t qemu_ram_pagesize(RAMBlock *rb)
1497{
1498 return rb->page_size;
1499}
1500
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001501static int memory_try_enable_merging(void *addr, size_t len)
1502{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001503 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001504 /* disabled by the user */
1505 return 0;
1506 }
1507
1508 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1509}
1510
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001511/* Only legal before guest might have detected the memory size: e.g. on
1512 * incoming migration, or right after reset.
1513 *
1514 * As memory core doesn't know how is memory accessed, it is up to
1515 * resize callback to update device state and/or add assertions to detect
1516 * misuse, if necessary.
1517 */
Gongleifa53a0e2016-05-10 10:04:59 +08001518int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001519{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001520 assert(block);
1521
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001522 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001523
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001524 if (block->used_length == newsize) {
1525 return 0;
1526 }
1527
1528 if (!(block->flags & RAM_RESIZEABLE)) {
1529 error_setg_errno(errp, EINVAL,
1530 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1531 " in != 0x" RAM_ADDR_FMT, block->idstr,
1532 newsize, block->used_length);
1533 return -EINVAL;
1534 }
1535
1536 if (block->max_length < newsize) {
1537 error_setg_errno(errp, EINVAL,
1538 "Length too large: %s: 0x" RAM_ADDR_FMT
1539 " > 0x" RAM_ADDR_FMT, block->idstr,
1540 newsize, block->max_length);
1541 return -EINVAL;
1542 }
1543
1544 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1545 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001546 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1547 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001548 memory_region_set_size(block->mr, newsize);
1549 if (block->resized) {
1550 block->resized(block->idstr, newsize, block->host);
1551 }
1552 return 0;
1553}
1554
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001555/* Called with ram_list.mutex held */
1556static void dirty_memory_extend(ram_addr_t old_ram_size,
1557 ram_addr_t new_ram_size)
1558{
1559 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1560 DIRTY_MEMORY_BLOCK_SIZE);
1561 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1562 DIRTY_MEMORY_BLOCK_SIZE);
1563 int i;
1564
1565 /* Only need to extend if block count increased */
1566 if (new_num_blocks <= old_num_blocks) {
1567 return;
1568 }
1569
1570 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1571 DirtyMemoryBlocks *old_blocks;
1572 DirtyMemoryBlocks *new_blocks;
1573 int j;
1574
1575 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1576 new_blocks = g_malloc(sizeof(*new_blocks) +
1577 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1578
1579 if (old_num_blocks) {
1580 memcpy(new_blocks->blocks, old_blocks->blocks,
1581 old_num_blocks * sizeof(old_blocks->blocks[0]));
1582 }
1583
1584 for (j = old_num_blocks; j < new_num_blocks; j++) {
1585 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1586 }
1587
1588 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1589
1590 if (old_blocks) {
1591 g_free_rcu(old_blocks, rcu);
1592 }
1593 }
1594}
1595
Fam Zheng528f46a2016-03-01 14:18:18 +08001596static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001597{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001598 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001599 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001600 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001601 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001602
1603 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001604
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001605 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001606 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001607
1608 if (!new_block->host) {
1609 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001610 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001611 new_block->mr, &err);
1612 if (err) {
1613 error_propagate(errp, err);
1614 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001615 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001616 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001617 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001618 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001619 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001620 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001621 error_setg_errno(errp, errno,
1622 "cannot set up guest memory '%s'",
1623 memory_region_name(new_block->mr));
1624 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001625 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001626 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001627 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001628 }
1629 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001630
Li Zhijiandd631692015-07-02 20:18:06 +08001631 new_ram_size = MAX(old_ram_size,
1632 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1633 if (new_ram_size > old_ram_size) {
1634 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001635 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001636 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001637 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1638 * QLIST (which has an RCU-friendly variant) does not have insertion at
1639 * tail, so save the last element in last_block.
1640 */
Mike Day0dc3f442013-09-05 14:41:35 -04001641 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001642 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001643 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001644 break;
1645 }
1646 }
1647 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001648 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001649 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001650 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001651 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001652 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001653 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001654 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001655
Mike Day0dc3f442013-09-05 14:41:35 -04001656 /* Write list before version */
1657 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001658 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001659 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001660
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001661 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001662 new_block->used_length,
1663 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001664
Paolo Bonzinia904c912015-01-21 16:18:35 +01001665 if (new_block->host) {
1666 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1667 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
Cao jinc2cd6272016-09-12 14:34:56 +08001668 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
Paolo Bonzinia904c912015-01-21 16:18:35 +01001669 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001670 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001671}
1672
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001673#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001674RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1675 bool share, const char *mem_path,
1676 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001677{
1678 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001679 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001680
1681 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001682 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001683 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001684 }
1685
1686 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1687 /*
1688 * file_ram_alloc() needs to allocate just like
1689 * phys_mem_alloc, but we haven't bothered to provide
1690 * a hook there.
1691 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001692 error_setg(errp,
1693 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001694 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001695 }
1696
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001697 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001698 new_block = g_malloc0(sizeof(*new_block));
1699 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001700 new_block->used_length = size;
1701 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001702 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001703 new_block->host = file_ram_alloc(new_block, size,
1704 mem_path, errp);
1705 if (!new_block->host) {
1706 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001707 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001708 }
1709
Fam Zheng528f46a2016-03-01 14:18:18 +08001710 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001711 if (local_err) {
1712 g_free(new_block);
1713 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001714 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001715 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001716 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001717}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001718#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001719
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001720static
Fam Zheng528f46a2016-03-01 14:18:18 +08001721RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1722 void (*resized)(const char*,
1723 uint64_t length,
1724 void *host),
1725 void *host, bool resizeable,
1726 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001727{
1728 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001729 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001730
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001731 size = HOST_PAGE_ALIGN(size);
1732 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001733 new_block = g_malloc0(sizeof(*new_block));
1734 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001735 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001736 new_block->used_length = size;
1737 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001738 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001739 new_block->fd = -1;
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001740 new_block->page_size = getpagesize();
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001741 new_block->host = host;
1742 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001743 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001744 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001745 if (resizeable) {
1746 new_block->flags |= RAM_RESIZEABLE;
1747 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001748 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001749 if (local_err) {
1750 g_free(new_block);
1751 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001752 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001753 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001754 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001755}
1756
Fam Zheng528f46a2016-03-01 14:18:18 +08001757RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001758 MemoryRegion *mr, Error **errp)
1759{
1760 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1761}
1762
Fam Zheng528f46a2016-03-01 14:18:18 +08001763RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001764{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001765 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1766}
1767
Fam Zheng528f46a2016-03-01 14:18:18 +08001768RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001769 void (*resized)(const char*,
1770 uint64_t length,
1771 void *host),
1772 MemoryRegion *mr, Error **errp)
1773{
1774 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001775}
bellarde9a1ab12007-02-08 23:08:38 +00001776
Paolo Bonzini43771532013-09-09 17:58:40 +02001777static void reclaim_ramblock(RAMBlock *block)
1778{
1779 if (block->flags & RAM_PREALLOC) {
1780 ;
1781 } else if (xen_enabled()) {
1782 xen_invalidate_map_cache_entry(block->host);
1783#ifndef _WIN32
1784 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001785 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001786 close(block->fd);
1787#endif
1788 } else {
1789 qemu_anon_ram_free(block->host, block->max_length);
1790 }
1791 g_free(block);
1792}
1793
Fam Zhengf1060c52016-03-01 14:18:22 +08001794void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001795{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001796 if (!block) {
1797 return;
1798 }
1799
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001800 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001801 QLIST_REMOVE_RCU(block, next);
1802 ram_list.mru_block = NULL;
1803 /* Write list before version */
1804 smp_wmb();
1805 ram_list.version++;
1806 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001807 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001808}
1809
Huang Yingcd19cfa2011-03-02 08:56:19 +01001810#ifndef _WIN32
1811void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1812{
1813 RAMBlock *block;
1814 ram_addr_t offset;
1815 int flags;
1816 void *area, *vaddr;
1817
Mike Day0dc3f442013-09-05 14:41:35 -04001818 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001819 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001820 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001821 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001822 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001823 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001824 } else if (xen_enabled()) {
1825 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001826 } else {
1827 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001828 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001829 flags |= (block->flags & RAM_SHARED ?
1830 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001831 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1832 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001833 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001834 /*
1835 * Remap needs to match alloc. Accelerators that
1836 * set phys_mem_alloc never remap. If they did,
1837 * we'd need a remap hook here.
1838 */
1839 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1840
Huang Yingcd19cfa2011-03-02 08:56:19 +01001841 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1842 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1843 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001844 }
1845 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001846 fprintf(stderr, "Could not remap addr: "
1847 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001848 length, addr);
1849 exit(1);
1850 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001851 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001852 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001853 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001854 }
1855 }
1856}
1857#endif /* !_WIN32 */
1858
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001859/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001860 * This should not be used for general purpose DMA. Use address_space_map
1861 * or address_space_rw instead. For local memory (e.g. video ram) that the
1862 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001863 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001864 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001865 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001866void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001867{
Gonglei3655cb92016-02-20 10:35:20 +08001868 RAMBlock *block = ram_block;
1869
1870 if (block == NULL) {
1871 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001872 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001873 }
Mike Dayae3a7042013-09-05 14:41:35 -04001874
1875 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001876 /* We need to check if the requested address is in the RAM
1877 * because we don't want to map the entire memory in QEMU.
1878 * In that case just map until the end of the page.
1879 */
1880 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001881 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001882 }
Mike Dayae3a7042013-09-05 14:41:35 -04001883
1884 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001885 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001886 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001887}
1888
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001889/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001890 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001891 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001892 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001893 */
Gonglei3655cb92016-02-20 10:35:20 +08001894static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1895 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001896{
Gonglei3655cb92016-02-20 10:35:20 +08001897 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001898 if (*size == 0) {
1899 return NULL;
1900 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001901
Gonglei3655cb92016-02-20 10:35:20 +08001902 if (block == NULL) {
1903 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001904 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001905 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001906 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001907
1908 if (xen_enabled() && block->host == NULL) {
1909 /* We need to check if the requested address is in the RAM
1910 * because we don't want to map the entire memory in QEMU.
1911 * In that case just map the requested area.
1912 */
1913 if (block->offset == 0) {
1914 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001915 }
1916
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001917 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001918 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001919
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001920 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001921}
1922
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001923/*
1924 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1925 * in that RAMBlock.
1926 *
1927 * ptr: Host pointer to look up
1928 * round_offset: If true round the result offset down to a page boundary
1929 * *ram_addr: set to result ram_addr
1930 * *offset: set to result offset within the RAMBlock
1931 *
1932 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001933 *
1934 * By the time this function returns, the returned pointer is not protected
1935 * by RCU anymore. If the caller is not within an RCU critical section and
1936 * does not hold the iothread lock, it must have other means of protecting the
1937 * pointer, such as a reference to the region that includes the incoming
1938 * ram_addr_t.
1939 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001940RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001941 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001942{
pbrook94a6b542009-04-11 17:15:54 +00001943 RAMBlock *block;
1944 uint8_t *host = ptr;
1945
Jan Kiszka868bb332011-06-21 22:59:09 +02001946 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001947 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001948 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001949 ram_addr = xen_ram_addr_from_mapcache(ptr);
1950 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001951 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001952 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001953 }
Mike Day0dc3f442013-09-05 14:41:35 -04001954 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001955 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001956 }
1957
Mike Day0dc3f442013-09-05 14:41:35 -04001958 rcu_read_lock();
1959 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001960 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001961 goto found;
1962 }
1963
Mike Day0dc3f442013-09-05 14:41:35 -04001964 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001965 /* This case append when the block is not mapped. */
1966 if (block->host == NULL) {
1967 continue;
1968 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001969 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001970 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001971 }
pbrook94a6b542009-04-11 17:15:54 +00001972 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001973
Mike Day0dc3f442013-09-05 14:41:35 -04001974 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001975 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001976
1977found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001978 *offset = (host - block->host);
1979 if (round_offset) {
1980 *offset &= TARGET_PAGE_MASK;
1981 }
Mike Day0dc3f442013-09-05 14:41:35 -04001982 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001983 return block;
1984}
1985
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001986/*
1987 * Finds the named RAMBlock
1988 *
1989 * name: The name of RAMBlock to find
1990 *
1991 * Returns: RAMBlock (or NULL if not found)
1992 */
1993RAMBlock *qemu_ram_block_by_name(const char *name)
1994{
1995 RAMBlock *block;
1996
1997 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1998 if (!strcmp(name, block->idstr)) {
1999 return block;
2000 }
2001 }
2002
2003 return NULL;
2004}
2005
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002006/* Some of the softmmu routines need to translate from a host pointer
2007 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002008ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002009{
2010 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02002011 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002012
Paolo Bonzinif615f392016-05-26 10:07:50 +02002013 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002014 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002015 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002016 }
2017
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002018 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002019}
Alex Williamsonf471a172010-06-11 11:11:42 -06002020
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002021/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002022static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002023 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002024{
Alex Bennéeba051fb2016-10-27 16:10:16 +01002025 bool locked = false;
2026
Juan Quintela52159192013-10-08 12:44:04 +02002027 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Alex Bennéeba051fb2016-10-27 16:10:16 +01002028 locked = true;
2029 tb_lock();
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002030 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002031 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002032 switch (size) {
2033 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002034 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002035 break;
2036 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002037 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002038 break;
2039 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002040 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002041 break;
2042 default:
2043 abort();
2044 }
Alex Bennéeba051fb2016-10-27 16:10:16 +01002045
2046 if (locked) {
2047 tb_unlock();
2048 }
2049
Paolo Bonzini58d27072015-03-23 11:56:01 +01002050 /* Set both VGA and migration bits for simplicity and to remove
2051 * the notdirty callback faster.
2052 */
2053 cpu_physical_memory_set_dirty_range(ram_addr, size,
2054 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002055 /* we remove the notdirty callback only if the code has been
2056 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002057 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002058 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002059 }
bellard1ccde1c2004-02-06 19:46:14 +00002060}
2061
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002062static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2063 unsigned size, bool is_write)
2064{
2065 return is_write;
2066}
2067
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002068static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002069 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002070 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002071 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002072};
2073
pbrook0f459d12008-06-09 00:20:13 +00002074/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002075static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002076{
Andreas Färber93afead2013-08-26 03:41:01 +02002077 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002078 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002079 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002080 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002081 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002082 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002083 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002084
Andreas Färberff4700b2013-08-26 18:23:18 +02002085 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002086 /* We re-entered the check after replacing the TB. Now raise
2087 * the debug interrupt so that is will trigger after the
2088 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002089 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002090 return;
2091 }
Andreas Färber93afead2013-08-26 03:41:01 +02002092 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002093 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002094 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2095 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002096 if (flags == BP_MEM_READ) {
2097 wp->flags |= BP_WATCHPOINT_HIT_READ;
2098 } else {
2099 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2100 }
2101 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002102 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002103 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002104 if (wp->flags & BP_CPU &&
2105 !cc->debug_check_watchpoint(cpu, wp)) {
2106 wp->flags &= ~BP_WATCHPOINT_HIT;
2107 continue;
2108 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002109 cpu->watchpoint_hit = wp;
KONRAD Frederica5e99822016-10-27 16:10:06 +01002110
2111 /* The tb_lock will be reset when cpu_loop_exit or
2112 * cpu_loop_exit_noexc longjmp back into the cpu_exec
2113 * main loop.
2114 */
2115 tb_lock();
Andreas Färber239c51a2013-09-01 17:12:23 +02002116 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002117 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002118 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002119 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002120 } else {
2121 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002122 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002123 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002124 }
aliguori06d55cc2008-11-18 20:24:06 +00002125 }
aliguori6e140f22008-11-18 20:37:55 +00002126 } else {
2127 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002128 }
2129 }
2130}
2131
pbrook6658ffb2007-03-16 23:58:11 +00002132/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2133 so these check for a hit then pass through to the normal out-of-line
2134 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002135static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2136 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002137{
Peter Maydell66b9b432015-04-26 16:49:24 +01002138 MemTxResult res;
2139 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002140 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2141 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002142
Peter Maydell66b9b432015-04-26 16:49:24 +01002143 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002144 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002145 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002146 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002147 break;
2148 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002149 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002150 break;
2151 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002152 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002153 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002154 default: abort();
2155 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002156 *pdata = data;
2157 return res;
2158}
2159
2160static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2161 uint64_t val, unsigned size,
2162 MemTxAttrs attrs)
2163{
2164 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002165 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2166 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002167
2168 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2169 switch (size) {
2170 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002171 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002172 break;
2173 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002174 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002175 break;
2176 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002177 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002178 break;
2179 default: abort();
2180 }
2181 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002182}
2183
Avi Kivity1ec9b902012-01-02 12:47:48 +02002184static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002185 .read_with_attrs = watch_mem_read,
2186 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002187 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002188};
pbrook6658ffb2007-03-16 23:58:11 +00002189
Peter Maydellf25a49e2015-04-26 16:49:24 +01002190static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2191 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002192{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002193 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002194 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002195 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002196
blueswir1db7b5422007-05-26 17:36:03 +00002197#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002198 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002199 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002200#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002201 res = address_space_read(subpage->as, addr + subpage->base,
2202 attrs, buf, len);
2203 if (res) {
2204 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002205 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002206 switch (len) {
2207 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002208 *data = ldub_p(buf);
2209 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002210 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002211 *data = lduw_p(buf);
2212 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002213 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002214 *data = ldl_p(buf);
2215 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002216 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002217 *data = ldq_p(buf);
2218 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002219 default:
2220 abort();
2221 }
blueswir1db7b5422007-05-26 17:36:03 +00002222}
2223
Peter Maydellf25a49e2015-04-26 16:49:24 +01002224static MemTxResult subpage_write(void *opaque, hwaddr addr,
2225 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002226{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002227 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002228 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002229
blueswir1db7b5422007-05-26 17:36:03 +00002230#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002231 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002232 " value %"PRIx64"\n",
2233 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002234#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002235 switch (len) {
2236 case 1:
2237 stb_p(buf, value);
2238 break;
2239 case 2:
2240 stw_p(buf, value);
2241 break;
2242 case 4:
2243 stl_p(buf, value);
2244 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002245 case 8:
2246 stq_p(buf, value);
2247 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002248 default:
2249 abort();
2250 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002251 return address_space_write(subpage->as, addr + subpage->base,
2252 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002253}
2254
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002255static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002256 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002257{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002258 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002259#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002260 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002261 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002262#endif
2263
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002264 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002265 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002266}
2267
Avi Kivity70c68e42012-01-02 12:32:48 +02002268static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002269 .read_with_attrs = subpage_read,
2270 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002271 .impl.min_access_size = 1,
2272 .impl.max_access_size = 8,
2273 .valid.min_access_size = 1,
2274 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002275 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002276 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002277};
2278
Anthony Liguoric227f092009-10-01 16:12:16 -05002279static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002280 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002281{
2282 int idx, eidx;
2283
2284 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2285 return -1;
2286 idx = SUBPAGE_IDX(start);
2287 eidx = SUBPAGE_IDX(end);
2288#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002289 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2290 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002291#endif
blueswir1db7b5422007-05-26 17:36:03 +00002292 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002293 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002294 }
2295
2296 return 0;
2297}
2298
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002299static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002300{
Anthony Liguoric227f092009-10-01 16:12:16 -05002301 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002302
Vijaya Kumar K2615fab2016-10-24 16:26:49 +01002303 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002304 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002305 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002306 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002307 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002308 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002309#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002310 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2311 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002312#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002313 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002314
2315 return mmio;
2316}
2317
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002318static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2319 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002320{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002321 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002322 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002323 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002324 .mr = mr,
2325 .offset_within_address_space = 0,
2326 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002327 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002328 };
2329
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002330 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002331}
2332
Peter Maydella54c87b2016-01-21 14:15:05 +00002333MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002334{
Peter Maydella54c87b2016-01-21 14:15:05 +00002335 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2336 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002337 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002338 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002339
2340 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002341}
2342
Avi Kivitye9179ce2009-06-14 11:38:52 +03002343static void io_mem_init(void)
2344{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002345 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002346 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002347 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002348 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002349 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002350 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002351 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002352}
2353
Avi Kivityac1970f2012-10-03 16:22:53 +02002354static void mem_begin(MemoryListener *listener)
2355{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002356 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002357 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2358 uint16_t n;
2359
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002360 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002361 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002362 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002363 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002364 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002365 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002366 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002367 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002368
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002369 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002370 d->as = as;
2371 as->next_dispatch = d;
2372}
2373
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002374static void address_space_dispatch_free(AddressSpaceDispatch *d)
2375{
2376 phys_sections_free(&d->map);
2377 g_free(d);
2378}
2379
Paolo Bonzini00752702013-05-29 12:13:54 +02002380static void mem_commit(MemoryListener *listener)
2381{
2382 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002383 AddressSpaceDispatch *cur = as->dispatch;
2384 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002385
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002386 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002387
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002388 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002389 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002390 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002391 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002392}
2393
Avi Kivity1d711482012-10-02 18:54:45 +02002394static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002395{
Peter Maydell32857f42015-10-01 15:29:50 +01002396 CPUAddressSpace *cpuas;
2397 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002398
2399 /* since each CPU stores ram addresses in its TLB cache, we must
2400 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002401 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2402 cpu_reloading_memory_map();
2403 /* The CPU and TLB are protected by the iothread lock.
2404 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2405 * may have split the RCU critical section.
2406 */
2407 d = atomic_rcu_read(&cpuas->as->dispatch);
Alex Bennéef35e44e2016-10-21 16:34:18 +01002408 atomic_rcu_set(&cpuas->memory_dispatch, d);
Peter Maydell32857f42015-10-01 15:29:50 +01002409 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002410}
2411
Avi Kivityac1970f2012-10-03 16:22:53 +02002412void address_space_init_dispatch(AddressSpace *as)
2413{
Paolo Bonzini00752702013-05-29 12:13:54 +02002414 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002415 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002416 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002417 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002418 .region_add = mem_add,
2419 .region_nop = mem_add,
2420 .priority = 0,
2421 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002422 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002423}
2424
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002425void address_space_unregister(AddressSpace *as)
2426{
2427 memory_listener_unregister(&as->dispatch_listener);
2428}
2429
Avi Kivity83f3c252012-10-07 12:59:55 +02002430void address_space_destroy_dispatch(AddressSpace *as)
2431{
2432 AddressSpaceDispatch *d = as->dispatch;
2433
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002434 atomic_rcu_set(&as->dispatch, NULL);
2435 if (d) {
2436 call_rcu(d, address_space_dispatch_free, rcu);
2437 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002438}
2439
Avi Kivity62152b82011-07-26 14:26:14 +03002440static void memory_map_init(void)
2441{
Anthony Liguori7267c092011-08-20 22:09:37 -05002442 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002443
Paolo Bonzini57271d62013-11-07 17:14:37 +01002444 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002445 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002446
Anthony Liguori7267c092011-08-20 22:09:37 -05002447 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002448 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2449 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002450 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002451}
2452
2453MemoryRegion *get_system_memory(void)
2454{
2455 return system_memory;
2456}
2457
Avi Kivity309cb472011-08-08 16:09:03 +03002458MemoryRegion *get_system_io(void)
2459{
2460 return system_io;
2461}
2462
pbrooke2eef172008-06-08 01:09:01 +00002463#endif /* !defined(CONFIG_USER_ONLY) */
2464
bellard13eb76e2004-01-24 15:23:36 +00002465/* physical memory access (slow version, mainly for debug) */
2466#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002467int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002468 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002469{
2470 int l, flags;
2471 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002472 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002473
2474 while (len > 0) {
2475 page = addr & TARGET_PAGE_MASK;
2476 l = (page + TARGET_PAGE_SIZE) - addr;
2477 if (l > len)
2478 l = len;
2479 flags = page_get_flags(page);
2480 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002481 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002482 if (is_write) {
2483 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002484 return -1;
bellard579a97f2007-11-11 14:26:47 +00002485 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002486 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002487 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002488 memcpy(p, buf, l);
2489 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002490 } else {
2491 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002492 return -1;
bellard579a97f2007-11-11 14:26:47 +00002493 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002494 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002495 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002496 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002497 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002498 }
2499 len -= l;
2500 buf += l;
2501 addr += l;
2502 }
Paul Brooka68fe892010-03-01 00:08:59 +00002503 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002504}
bellard8df1cd02005-01-28 22:37:22 +00002505
bellard13eb76e2004-01-24 15:23:36 +00002506#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002507
Paolo Bonzini845b6212015-03-23 11:45:53 +01002508static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002509 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002510{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002511 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002512 addr += memory_region_get_ram_addr(mr);
2513
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002514 /* No early return if dirty_log_mask is or becomes 0, because
2515 * cpu_physical_memory_set_dirty_range will still call
2516 * xen_modified_memory.
2517 */
2518 if (dirty_log_mask) {
2519 dirty_log_mask =
2520 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002521 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002522 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
Alex Bennéeba051fb2016-10-27 16:10:16 +01002523 tb_lock();
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002524 tb_invalidate_phys_range(addr, addr + length);
Alex Bennéeba051fb2016-10-27 16:10:16 +01002525 tb_unlock();
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002526 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2527 }
2528 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002529}
2530
Richard Henderson23326162013-07-08 14:55:59 -07002531static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002532{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002533 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002534
2535 /* Regions are assumed to support 1-4 byte accesses unless
2536 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002537 if (access_size_max == 0) {
2538 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002539 }
Richard Henderson23326162013-07-08 14:55:59 -07002540
2541 /* Bound the maximum access by the alignment of the address. */
2542 if (!mr->ops->impl.unaligned) {
2543 unsigned align_size_max = addr & -addr;
2544 if (align_size_max != 0 && align_size_max < access_size_max) {
2545 access_size_max = align_size_max;
2546 }
2547 }
2548
2549 /* Don't attempt accesses larger than the maximum. */
2550 if (l > access_size_max) {
2551 l = access_size_max;
2552 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002553 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002554
2555 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002556}
2557
Jan Kiszka4840f102015-06-18 18:47:22 +02002558static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002559{
Jan Kiszka4840f102015-06-18 18:47:22 +02002560 bool unlocked = !qemu_mutex_iothread_locked();
2561 bool release_lock = false;
2562
2563 if (unlocked && mr->global_locking) {
2564 qemu_mutex_lock_iothread();
2565 unlocked = false;
2566 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002567 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002568 if (mr->flush_coalesced_mmio) {
2569 if (unlocked) {
2570 qemu_mutex_lock_iothread();
2571 }
2572 qemu_flush_coalesced_mmio_buffer();
2573 if (unlocked) {
2574 qemu_mutex_unlock_iothread();
2575 }
2576 }
2577
2578 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002579}
2580
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002581/* Called within RCU critical section. */
2582static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2583 MemTxAttrs attrs,
2584 const uint8_t *buf,
2585 int len, hwaddr addr1,
2586 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002587{
bellard13eb76e2004-01-24 15:23:36 +00002588 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002589 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002590 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002591 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002592
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002593 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002594 if (!memory_access_is_direct(mr, true)) {
2595 release_lock |= prepare_mmio_access(mr);
2596 l = memory_access_size(mr, l, addr1);
2597 /* XXX: could force current_cpu to NULL to avoid
2598 potential bugs */
2599 switch (l) {
2600 case 8:
2601 /* 64 bit write access */
2602 val = ldq_p(buf);
2603 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2604 attrs);
2605 break;
2606 case 4:
2607 /* 32 bit write access */
2608 val = ldl_p(buf);
2609 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2610 attrs);
2611 break;
2612 case 2:
2613 /* 16 bit write access */
2614 val = lduw_p(buf);
2615 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2616 attrs);
2617 break;
2618 case 1:
2619 /* 8 bit write access */
2620 val = ldub_p(buf);
2621 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2622 attrs);
2623 break;
2624 default:
2625 abort();
bellard13eb76e2004-01-24 15:23:36 +00002626 }
2627 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002628 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002629 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002630 memcpy(ptr, buf, l);
2631 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002632 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002633
2634 if (release_lock) {
2635 qemu_mutex_unlock_iothread();
2636 release_lock = false;
2637 }
2638
bellard13eb76e2004-01-24 15:23:36 +00002639 len -= l;
2640 buf += l;
2641 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002642
2643 if (!len) {
2644 break;
2645 }
2646
2647 l = len;
2648 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002649 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002650
Peter Maydell3b643492015-04-26 16:49:23 +01002651 return result;
bellard13eb76e2004-01-24 15:23:36 +00002652}
bellard8df1cd02005-01-28 22:37:22 +00002653
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002654MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2655 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002656{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002657 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002658 hwaddr addr1;
2659 MemoryRegion *mr;
2660 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002661
2662 if (len > 0) {
2663 rcu_read_lock();
2664 l = len;
2665 mr = address_space_translate(as, addr, &addr1, &l, true);
2666 result = address_space_write_continue(as, addr, attrs, buf, len,
2667 addr1, l, mr);
2668 rcu_read_unlock();
2669 }
2670
2671 return result;
2672}
2673
2674/* Called within RCU critical section. */
2675MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2676 MemTxAttrs attrs, uint8_t *buf,
2677 int len, hwaddr addr1, hwaddr l,
2678 MemoryRegion *mr)
2679{
2680 uint8_t *ptr;
2681 uint64_t val;
2682 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002683 bool release_lock = false;
2684
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002685 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002686 if (!memory_access_is_direct(mr, false)) {
2687 /* I/O case */
2688 release_lock |= prepare_mmio_access(mr);
2689 l = memory_access_size(mr, l, addr1);
2690 switch (l) {
2691 case 8:
2692 /* 64 bit read access */
2693 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2694 attrs);
2695 stq_p(buf, val);
2696 break;
2697 case 4:
2698 /* 32 bit read access */
2699 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2700 attrs);
2701 stl_p(buf, val);
2702 break;
2703 case 2:
2704 /* 16 bit read access */
2705 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2706 attrs);
2707 stw_p(buf, val);
2708 break;
2709 case 1:
2710 /* 8 bit read access */
2711 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2712 attrs);
2713 stb_p(buf, val);
2714 break;
2715 default:
2716 abort();
2717 }
2718 } else {
2719 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002720 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002721 memcpy(buf, ptr, l);
2722 }
2723
2724 if (release_lock) {
2725 qemu_mutex_unlock_iothread();
2726 release_lock = false;
2727 }
2728
2729 len -= l;
2730 buf += l;
2731 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002732
2733 if (!len) {
2734 break;
2735 }
2736
2737 l = len;
2738 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002739 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002740
2741 return result;
2742}
2743
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002744MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2745 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002746{
2747 hwaddr l;
2748 hwaddr addr1;
2749 MemoryRegion *mr;
2750 MemTxResult result = MEMTX_OK;
2751
2752 if (len > 0) {
2753 rcu_read_lock();
2754 l = len;
2755 mr = address_space_translate(as, addr, &addr1, &l, false);
2756 result = address_space_read_continue(as, addr, attrs, buf, len,
2757 addr1, l, mr);
2758 rcu_read_unlock();
2759 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002760
2761 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002762}
2763
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002764MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2765 uint8_t *buf, int len, bool is_write)
2766{
2767 if (is_write) {
2768 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2769 } else {
2770 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2771 }
2772}
Avi Kivityac1970f2012-10-03 16:22:53 +02002773
Avi Kivitya8170e52012-10-23 12:30:10 +02002774void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002775 int len, int is_write)
2776{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002777 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2778 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002779}
2780
Alexander Graf582b55a2013-12-11 14:17:44 +01002781enum write_rom_type {
2782 WRITE_DATA,
2783 FLUSH_CACHE,
2784};
2785
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002786static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002787 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002788{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002789 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002790 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002791 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002792 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002793
Paolo Bonzini41063e12015-03-18 14:21:43 +01002794 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002795 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002796 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002797 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002798
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002799 if (!(memory_region_is_ram(mr) ||
2800 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002801 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002802 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002803 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002804 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002805 switch (type) {
2806 case WRITE_DATA:
2807 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002808 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002809 break;
2810 case FLUSH_CACHE:
2811 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2812 break;
2813 }
bellardd0ecd2a2006-04-23 17:14:48 +00002814 }
2815 len -= l;
2816 buf += l;
2817 addr += l;
2818 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002819 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002820}
2821
Alexander Graf582b55a2013-12-11 14:17:44 +01002822/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002823void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002824 const uint8_t *buf, int len)
2825{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002826 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002827}
2828
2829void cpu_flush_icache_range(hwaddr start, int len)
2830{
2831 /*
2832 * This function should do the same thing as an icache flush that was
2833 * triggered from within the guest. For TCG we are always cache coherent,
2834 * so there is no need to flush anything. For KVM / Xen we need to flush
2835 * the host's instruction cache at least.
2836 */
2837 if (tcg_enabled()) {
2838 return;
2839 }
2840
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002841 cpu_physical_memory_write_rom_internal(&address_space_memory,
2842 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002843}
2844
aliguori6d16c2f2009-01-22 16:59:11 +00002845typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002846 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002847 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002848 hwaddr addr;
2849 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002850 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002851} BounceBuffer;
2852
2853static BounceBuffer bounce;
2854
aliguoriba223c22009-01-22 16:59:16 +00002855typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002856 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002857 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002858} MapClient;
2859
Fam Zheng38e047b2015-03-16 17:03:35 +08002860QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002861static QLIST_HEAD(map_client_list, MapClient) map_client_list
2862 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002863
Fam Zhenge95205e2015-03-16 17:03:37 +08002864static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002865{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002866 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002867 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002868}
2869
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002870static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002871{
2872 MapClient *client;
2873
Blue Swirl72cf2d42009-09-12 07:36:22 +00002874 while (!QLIST_EMPTY(&map_client_list)) {
2875 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002876 qemu_bh_schedule(client->bh);
2877 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002878 }
2879}
2880
Fam Zhenge95205e2015-03-16 17:03:37 +08002881void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002882{
2883 MapClient *client = g_malloc(sizeof(*client));
2884
Fam Zheng38e047b2015-03-16 17:03:35 +08002885 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002886 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002887 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002888 if (!atomic_read(&bounce.in_use)) {
2889 cpu_notify_map_clients_locked();
2890 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002891 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002892}
2893
Fam Zheng38e047b2015-03-16 17:03:35 +08002894void cpu_exec_init_all(void)
2895{
2896 qemu_mutex_init(&ram_list.mutex);
Peter Maydell20bccb82016-10-24 16:26:49 +01002897 /* The data structures we set up here depend on knowing the page size,
2898 * so no more changes can be made after this point.
2899 * In an ideal world, nothing we did before we had finished the
2900 * machine setup would care about the target page size, and we could
2901 * do this much later, rather than requiring board models to state
2902 * up front what their requirements are.
2903 */
2904 finalize_target_page_bits();
Fam Zheng38e047b2015-03-16 17:03:35 +08002905 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002906 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002907 qemu_mutex_init(&map_client_list_lock);
2908}
2909
Fam Zhenge95205e2015-03-16 17:03:37 +08002910void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002911{
Fam Zhenge95205e2015-03-16 17:03:37 +08002912 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002913
Fam Zhenge95205e2015-03-16 17:03:37 +08002914 qemu_mutex_lock(&map_client_list_lock);
2915 QLIST_FOREACH(client, &map_client_list, link) {
2916 if (client->bh == bh) {
2917 cpu_unregister_map_client_do(client);
2918 break;
2919 }
2920 }
2921 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002922}
2923
2924static void cpu_notify_map_clients(void)
2925{
Fam Zheng38e047b2015-03-16 17:03:35 +08002926 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002927 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002928 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002929}
2930
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002931bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2932{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002933 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002934 hwaddr l, xlat;
2935
Paolo Bonzini41063e12015-03-18 14:21:43 +01002936 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002937 while (len > 0) {
2938 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002939 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2940 if (!memory_access_is_direct(mr, is_write)) {
2941 l = memory_access_size(mr, l, addr);
2942 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002943 return false;
2944 }
2945 }
2946
2947 len -= l;
2948 addr += l;
2949 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002950 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002951 return true;
2952}
2953
aliguori6d16c2f2009-01-22 16:59:11 +00002954/* Map a physical memory region into a host virtual address.
2955 * May map a subset of the requested range, given by and returned in *plen.
2956 * May return NULL if resources needed to perform the mapping are exhausted.
2957 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002958 * Use cpu_register_map_client() to know when retrying the map operation is
2959 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002960 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002961void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002962 hwaddr addr,
2963 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002964 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002965{
Avi Kivitya8170e52012-10-23 12:30:10 +02002966 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002967 hwaddr done = 0;
2968 hwaddr l, xlat, base;
2969 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002970 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002971
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002972 if (len == 0) {
2973 return NULL;
2974 }
aliguori6d16c2f2009-01-22 16:59:11 +00002975
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002976 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002977 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002978 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002979
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002980 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002981 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002982 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002983 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002984 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002985 /* Avoid unbounded allocations */
2986 l = MIN(l, TARGET_PAGE_SIZE);
2987 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002988 bounce.addr = addr;
2989 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002990
2991 memory_region_ref(mr);
2992 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002993 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002994 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2995 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002996 }
aliguori6d16c2f2009-01-22 16:59:11 +00002997
Paolo Bonzini41063e12015-03-18 14:21:43 +01002998 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002999 *plen = l;
3000 return bounce.buffer;
3001 }
3002
3003 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003004
3005 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00003006 len -= l;
3007 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003008 done += l;
3009 if (len == 0) {
3010 break;
3011 }
3012
3013 l = len;
3014 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3015 if (this_mr != mr || xlat != base + done) {
3016 break;
3017 }
aliguori6d16c2f2009-01-22 16:59:11 +00003018 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003019
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003020 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003021 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003022 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003023 rcu_read_unlock();
3024
3025 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003026}
3027
Avi Kivityac1970f2012-10-03 16:22:53 +02003028/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003029 * Will also mark the memory as dirty if is_write == 1. access_len gives
3030 * the amount of memory that was actually read or written by the caller.
3031 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003032void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3033 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003034{
3035 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003036 MemoryRegion *mr;
3037 ram_addr_t addr1;
3038
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01003039 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003040 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003041 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003042 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003043 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003044 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003045 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003046 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003047 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003048 return;
3049 }
3050 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003051 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3052 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003053 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003054 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003055 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003056 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003057 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003058 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003059}
bellardd0ecd2a2006-04-23 17:14:48 +00003060
Avi Kivitya8170e52012-10-23 12:30:10 +02003061void *cpu_physical_memory_map(hwaddr addr,
3062 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003063 int is_write)
3064{
3065 return address_space_map(&address_space_memory, addr, plen, is_write);
3066}
3067
Avi Kivitya8170e52012-10-23 12:30:10 +02003068void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3069 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003070{
3071 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3072}
3073
bellard8df1cd02005-01-28 22:37:22 +00003074/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003075static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3076 MemTxAttrs attrs,
3077 MemTxResult *result,
3078 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003079{
bellard8df1cd02005-01-28 22:37:22 +00003080 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003081 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003082 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003083 hwaddr l = 4;
3084 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003085 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003086 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003087
Paolo Bonzini41063e12015-03-18 14:21:43 +01003088 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003089 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003090 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003091 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003092
bellard8df1cd02005-01-28 22:37:22 +00003093 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003094 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003095#if defined(TARGET_WORDS_BIGENDIAN)
3096 if (endian == DEVICE_LITTLE_ENDIAN) {
3097 val = bswap32(val);
3098 }
3099#else
3100 if (endian == DEVICE_BIG_ENDIAN) {
3101 val = bswap32(val);
3102 }
3103#endif
bellard8df1cd02005-01-28 22:37:22 +00003104 } else {
3105 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003106 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003107 switch (endian) {
3108 case DEVICE_LITTLE_ENDIAN:
3109 val = ldl_le_p(ptr);
3110 break;
3111 case DEVICE_BIG_ENDIAN:
3112 val = ldl_be_p(ptr);
3113 break;
3114 default:
3115 val = ldl_p(ptr);
3116 break;
3117 }
Peter Maydell50013112015-04-26 16:49:24 +01003118 r = MEMTX_OK;
3119 }
3120 if (result) {
3121 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003122 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003123 if (release_lock) {
3124 qemu_mutex_unlock_iothread();
3125 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003126 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003127 return val;
3128}
3129
Peter Maydell50013112015-04-26 16:49:24 +01003130uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3131 MemTxAttrs attrs, MemTxResult *result)
3132{
3133 return address_space_ldl_internal(as, addr, attrs, result,
3134 DEVICE_NATIVE_ENDIAN);
3135}
3136
3137uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3138 MemTxAttrs attrs, MemTxResult *result)
3139{
3140 return address_space_ldl_internal(as, addr, attrs, result,
3141 DEVICE_LITTLE_ENDIAN);
3142}
3143
3144uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3145 MemTxAttrs attrs, MemTxResult *result)
3146{
3147 return address_space_ldl_internal(as, addr, attrs, result,
3148 DEVICE_BIG_ENDIAN);
3149}
3150
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003151uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003152{
Peter Maydell50013112015-04-26 16:49:24 +01003153 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003154}
3155
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003156uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003157{
Peter Maydell50013112015-04-26 16:49:24 +01003158 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003159}
3160
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003161uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003162{
Peter Maydell50013112015-04-26 16:49:24 +01003163 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003164}
3165
bellard84b7b8e2005-11-28 21:19:04 +00003166/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003167static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3168 MemTxAttrs attrs,
3169 MemTxResult *result,
3170 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003171{
bellard84b7b8e2005-11-28 21:19:04 +00003172 uint8_t *ptr;
3173 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003174 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003175 hwaddr l = 8;
3176 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003177 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003178 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003179
Paolo Bonzini41063e12015-03-18 14:21:43 +01003180 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003181 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003182 false);
3183 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003184 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003185
bellard84b7b8e2005-11-28 21:19:04 +00003186 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003187 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003188#if defined(TARGET_WORDS_BIGENDIAN)
3189 if (endian == DEVICE_LITTLE_ENDIAN) {
3190 val = bswap64(val);
3191 }
3192#else
3193 if (endian == DEVICE_BIG_ENDIAN) {
3194 val = bswap64(val);
3195 }
3196#endif
bellard84b7b8e2005-11-28 21:19:04 +00003197 } else {
3198 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003199 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003200 switch (endian) {
3201 case DEVICE_LITTLE_ENDIAN:
3202 val = ldq_le_p(ptr);
3203 break;
3204 case DEVICE_BIG_ENDIAN:
3205 val = ldq_be_p(ptr);
3206 break;
3207 default:
3208 val = ldq_p(ptr);
3209 break;
3210 }
Peter Maydell50013112015-04-26 16:49:24 +01003211 r = MEMTX_OK;
3212 }
3213 if (result) {
3214 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003215 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003216 if (release_lock) {
3217 qemu_mutex_unlock_iothread();
3218 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003219 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003220 return val;
3221}
3222
Peter Maydell50013112015-04-26 16:49:24 +01003223uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3224 MemTxAttrs attrs, MemTxResult *result)
3225{
3226 return address_space_ldq_internal(as, addr, attrs, result,
3227 DEVICE_NATIVE_ENDIAN);
3228}
3229
3230uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3231 MemTxAttrs attrs, MemTxResult *result)
3232{
3233 return address_space_ldq_internal(as, addr, attrs, result,
3234 DEVICE_LITTLE_ENDIAN);
3235}
3236
3237uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3238 MemTxAttrs attrs, MemTxResult *result)
3239{
3240 return address_space_ldq_internal(as, addr, attrs, result,
3241 DEVICE_BIG_ENDIAN);
3242}
3243
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003244uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003245{
Peter Maydell50013112015-04-26 16:49:24 +01003246 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003247}
3248
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003249uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003250{
Peter Maydell50013112015-04-26 16:49:24 +01003251 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003252}
3253
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003254uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003255{
Peter Maydell50013112015-04-26 16:49:24 +01003256 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003257}
3258
bellardaab33092005-10-30 20:48:42 +00003259/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003260uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3261 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003262{
3263 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01003264 MemTxResult r;
3265
3266 r = address_space_rw(as, addr, attrs, &val, 1, 0);
3267 if (result) {
3268 *result = r;
3269 }
bellardaab33092005-10-30 20:48:42 +00003270 return val;
3271}
3272
Peter Maydell50013112015-04-26 16:49:24 +01003273uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3274{
3275 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3276}
3277
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003278/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003279static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3280 hwaddr addr,
3281 MemTxAttrs attrs,
3282 MemTxResult *result,
3283 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003284{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003285 uint8_t *ptr;
3286 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003287 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003288 hwaddr l = 2;
3289 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003290 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003291 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003292
Paolo Bonzini41063e12015-03-18 14:21:43 +01003293 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003294 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003295 false);
3296 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003297 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003298
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003299 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003300 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003301#if defined(TARGET_WORDS_BIGENDIAN)
3302 if (endian == DEVICE_LITTLE_ENDIAN) {
3303 val = bswap16(val);
3304 }
3305#else
3306 if (endian == DEVICE_BIG_ENDIAN) {
3307 val = bswap16(val);
3308 }
3309#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003310 } else {
3311 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003312 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003313 switch (endian) {
3314 case DEVICE_LITTLE_ENDIAN:
3315 val = lduw_le_p(ptr);
3316 break;
3317 case DEVICE_BIG_ENDIAN:
3318 val = lduw_be_p(ptr);
3319 break;
3320 default:
3321 val = lduw_p(ptr);
3322 break;
3323 }
Peter Maydell50013112015-04-26 16:49:24 +01003324 r = MEMTX_OK;
3325 }
3326 if (result) {
3327 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003328 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003329 if (release_lock) {
3330 qemu_mutex_unlock_iothread();
3331 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003332 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003333 return val;
bellardaab33092005-10-30 20:48:42 +00003334}
3335
Peter Maydell50013112015-04-26 16:49:24 +01003336uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3337 MemTxAttrs attrs, MemTxResult *result)
3338{
3339 return address_space_lduw_internal(as, addr, attrs, result,
3340 DEVICE_NATIVE_ENDIAN);
3341}
3342
3343uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3344 MemTxAttrs attrs, MemTxResult *result)
3345{
3346 return address_space_lduw_internal(as, addr, attrs, result,
3347 DEVICE_LITTLE_ENDIAN);
3348}
3349
3350uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3351 MemTxAttrs attrs, MemTxResult *result)
3352{
3353 return address_space_lduw_internal(as, addr, attrs, result,
3354 DEVICE_BIG_ENDIAN);
3355}
3356
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003357uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003358{
Peter Maydell50013112015-04-26 16:49:24 +01003359 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003360}
3361
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003362uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003363{
Peter Maydell50013112015-04-26 16:49:24 +01003364 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003365}
3366
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003367uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003368{
Peter Maydell50013112015-04-26 16:49:24 +01003369 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370}
3371
bellard8df1cd02005-01-28 22:37:22 +00003372/* warning: addr must be aligned. The ram page is not masked as dirty
3373 and the code inside is not invalidated. It is useful if the dirty
3374 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003375void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3376 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003377{
bellard8df1cd02005-01-28 22:37:22 +00003378 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003379 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003380 hwaddr l = 4;
3381 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003382 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003383 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003384 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003385
Paolo Bonzini41063e12015-03-18 14:21:43 +01003386 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003387 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003388 true);
3389 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003390 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003391
Peter Maydell50013112015-04-26 16:49:24 +01003392 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003393 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003394 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003395 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003396
Paolo Bonzini845b6212015-03-23 11:45:53 +01003397 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3398 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003399 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3400 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003401 r = MEMTX_OK;
3402 }
3403 if (result) {
3404 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003405 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003406 if (release_lock) {
3407 qemu_mutex_unlock_iothread();
3408 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003409 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003410}
3411
Peter Maydell50013112015-04-26 16:49:24 +01003412void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3413{
3414 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3415}
3416
bellard8df1cd02005-01-28 22:37:22 +00003417/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003418static inline void address_space_stl_internal(AddressSpace *as,
3419 hwaddr addr, uint32_t val,
3420 MemTxAttrs attrs,
3421 MemTxResult *result,
3422 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003423{
bellard8df1cd02005-01-28 22:37:22 +00003424 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003425 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003426 hwaddr l = 4;
3427 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003428 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003429 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003430
Paolo Bonzini41063e12015-03-18 14:21:43 +01003431 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003432 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003433 true);
3434 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003435 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003436
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003437#if defined(TARGET_WORDS_BIGENDIAN)
3438 if (endian == DEVICE_LITTLE_ENDIAN) {
3439 val = bswap32(val);
3440 }
3441#else
3442 if (endian == DEVICE_BIG_ENDIAN) {
3443 val = bswap32(val);
3444 }
3445#endif
Peter Maydell50013112015-04-26 16:49:24 +01003446 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003447 } else {
bellard8df1cd02005-01-28 22:37:22 +00003448 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003449 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003450 switch (endian) {
3451 case DEVICE_LITTLE_ENDIAN:
3452 stl_le_p(ptr, val);
3453 break;
3454 case DEVICE_BIG_ENDIAN:
3455 stl_be_p(ptr, val);
3456 break;
3457 default:
3458 stl_p(ptr, val);
3459 break;
3460 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003461 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003462 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003463 }
Peter Maydell50013112015-04-26 16:49:24 +01003464 if (result) {
3465 *result = r;
3466 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003467 if (release_lock) {
3468 qemu_mutex_unlock_iothread();
3469 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003470 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003471}
3472
3473void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3474 MemTxAttrs attrs, MemTxResult *result)
3475{
3476 address_space_stl_internal(as, addr, val, attrs, result,
3477 DEVICE_NATIVE_ENDIAN);
3478}
3479
3480void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3481 MemTxAttrs attrs, MemTxResult *result)
3482{
3483 address_space_stl_internal(as, addr, val, attrs, result,
3484 DEVICE_LITTLE_ENDIAN);
3485}
3486
3487void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3488 MemTxAttrs attrs, MemTxResult *result)
3489{
3490 address_space_stl_internal(as, addr, val, attrs, result,
3491 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003492}
3493
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003494void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003495{
Peter Maydell50013112015-04-26 16:49:24 +01003496 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003497}
3498
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003499void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003500{
Peter Maydell50013112015-04-26 16:49:24 +01003501 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502}
3503
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003504void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003505{
Peter Maydell50013112015-04-26 16:49:24 +01003506 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507}
3508
bellardaab33092005-10-30 20:48:42 +00003509/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003510void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3511 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003512{
3513 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003514 MemTxResult r;
3515
3516 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3517 if (result) {
3518 *result = r;
3519 }
3520}
3521
3522void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3523{
3524 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003525}
3526
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003527/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003528static inline void address_space_stw_internal(AddressSpace *as,
3529 hwaddr addr, uint32_t val,
3530 MemTxAttrs attrs,
3531 MemTxResult *result,
3532 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003533{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003534 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003535 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003536 hwaddr l = 2;
3537 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003538 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003539 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003540
Paolo Bonzini41063e12015-03-18 14:21:43 +01003541 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003542 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003543 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003544 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003545
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003546#if defined(TARGET_WORDS_BIGENDIAN)
3547 if (endian == DEVICE_LITTLE_ENDIAN) {
3548 val = bswap16(val);
3549 }
3550#else
3551 if (endian == DEVICE_BIG_ENDIAN) {
3552 val = bswap16(val);
3553 }
3554#endif
Peter Maydell50013112015-04-26 16:49:24 +01003555 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003556 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003557 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003558 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003559 switch (endian) {
3560 case DEVICE_LITTLE_ENDIAN:
3561 stw_le_p(ptr, val);
3562 break;
3563 case DEVICE_BIG_ENDIAN:
3564 stw_be_p(ptr, val);
3565 break;
3566 default:
3567 stw_p(ptr, val);
3568 break;
3569 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003570 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003571 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003572 }
Peter Maydell50013112015-04-26 16:49:24 +01003573 if (result) {
3574 *result = r;
3575 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003576 if (release_lock) {
3577 qemu_mutex_unlock_iothread();
3578 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003579 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003580}
3581
3582void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3583 MemTxAttrs attrs, MemTxResult *result)
3584{
3585 address_space_stw_internal(as, addr, val, attrs, result,
3586 DEVICE_NATIVE_ENDIAN);
3587}
3588
3589void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3590 MemTxAttrs attrs, MemTxResult *result)
3591{
3592 address_space_stw_internal(as, addr, val, attrs, result,
3593 DEVICE_LITTLE_ENDIAN);
3594}
3595
3596void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3597 MemTxAttrs attrs, MemTxResult *result)
3598{
3599 address_space_stw_internal(as, addr, val, attrs, result,
3600 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003601}
3602
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003603void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003604{
Peter Maydell50013112015-04-26 16:49:24 +01003605 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003606}
3607
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003608void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003609{
Peter Maydell50013112015-04-26 16:49:24 +01003610 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003611}
3612
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003613void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003614{
Peter Maydell50013112015-04-26 16:49:24 +01003615 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003616}
3617
bellardaab33092005-10-30 20:48:42 +00003618/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003619void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3620 MemTxAttrs attrs, MemTxResult *result)
3621{
3622 MemTxResult r;
3623 val = tswap64(val);
3624 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3625 if (result) {
3626 *result = r;
3627 }
3628}
3629
3630void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3631 MemTxAttrs attrs, MemTxResult *result)
3632{
3633 MemTxResult r;
3634 val = cpu_to_le64(val);
3635 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3636 if (result) {
3637 *result = r;
3638 }
3639}
3640void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3641 MemTxAttrs attrs, MemTxResult *result)
3642{
3643 MemTxResult r;
3644 val = cpu_to_be64(val);
3645 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3646 if (result) {
3647 *result = r;
3648 }
3649}
3650
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003651void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003652{
Peter Maydell50013112015-04-26 16:49:24 +01003653 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003654}
3655
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003656void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003657{
Peter Maydell50013112015-04-26 16:49:24 +01003658 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003659}
3660
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003661void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003662{
Peter Maydell50013112015-04-26 16:49:24 +01003663 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003664}
3665
aliguori5e2972f2009-03-28 17:51:36 +00003666/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003667int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003668 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003669{
3670 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003671 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003672 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003673
3674 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003675 int asidx;
3676 MemTxAttrs attrs;
3677
bellard13eb76e2004-01-24 15:23:36 +00003678 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003679 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3680 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003681 /* if no physical page mapped, return an error */
3682 if (phys_addr == -1)
3683 return -1;
3684 l = (page + TARGET_PAGE_SIZE) - addr;
3685 if (l > len)
3686 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003687 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003688 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003689 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3690 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003691 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003692 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3693 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003694 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003695 }
bellard13eb76e2004-01-24 15:23:36 +00003696 len -= l;
3697 buf += l;
3698 addr += l;
3699 }
3700 return 0;
3701}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003702
3703/*
3704 * Allows code that needs to deal with migration bitmaps etc to still be built
3705 * target independent.
3706 */
3707size_t qemu_target_page_bits(void)
3708{
3709 return TARGET_PAGE_BITS;
3710}
3711
Paul Brooka68fe892010-03-01 00:08:59 +00003712#endif
bellard13eb76e2004-01-24 15:23:36 +00003713
Blue Swirl8e4a4242013-01-06 18:30:17 +00003714/*
3715 * A helper function for the _utterly broken_ virtio device model to find out if
3716 * it's running on a big endian machine. Don't do this at home kids!
3717 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003718bool target_words_bigendian(void);
3719bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003720{
3721#if defined(TARGET_WORDS_BIGENDIAN)
3722 return true;
3723#else
3724 return false;
3725#endif
3726}
3727
Wen Congyang76f35532012-05-07 12:04:18 +08003728#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003729bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003730{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003731 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003732 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003733 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003734
Paolo Bonzini41063e12015-03-18 14:21:43 +01003735 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003736 mr = address_space_translate(&address_space_memory,
3737 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003738
Paolo Bonzini41063e12015-03-18 14:21:43 +01003739 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3740 rcu_read_unlock();
3741 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003742}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003743
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003744int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003745{
3746 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003747 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003748
Mike Day0dc3f442013-09-05 14:41:35 -04003749 rcu_read_lock();
3750 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003751 ret = func(block->idstr, block->host, block->offset,
3752 block->used_length, opaque);
3753 if (ret) {
3754 break;
3755 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003756 }
Mike Day0dc3f442013-09-05 14:41:35 -04003757 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003758 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003759}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003760#endif