blob: 1c62d01282f27f7a057321b0c21b5973a24a1760 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
Peter Maydell7b31bbc2016-01-26 18:16:56 +000019#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010020#include "qapi/error.h"
Stefan Weil777872e2014-02-23 18:02:08 +010021#ifndef _WIN32
bellardd5a8f072004-09-29 21:15:28 +000022#endif
bellard54936002003-05-13 00:25:15 +000023
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020024#include "qemu/cutils.h"
bellard6180a182003-09-30 21:04:53 +000025#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020028#include "hw/qdev-core.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010031#include "hw/xen/xen.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010032#endif
Paolo Bonzini9c17d612012-12-17 18:20:04 +010033#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020034#include "sysemu/sysemu.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010035#include "qemu/timer.h"
36#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020037#include "qemu/error-report.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
Markus Armbrustera9c94272016-06-22 19:11:19 +020039#include "qemu.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010040#else /* !CONFIG_USER_ONLY */
Paolo Bonzini741da0d2014-06-27 08:40:04 +020041#include "hw/hw.h"
42#include "exec/memory.h"
Paolo Bonzinidf43d492016-03-16 10:24:54 +010043#include "exec/ioport.h"
Paolo Bonzini741da0d2014-06-27 08:40:04 +020044#include "sysemu/dma.h"
45#include "exec/address-spaces.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Jan Kiszka4840f102015-06-18 18:47:22 +020051#include "qemu/main-loop.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Pavel Dovgalyuk76159362015-09-17 19:25:07 +030053#include "sysemu/replay.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
Paolo Bonzini022c62c2012-12-17 18:19:49 +010055#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020056#include "exec/ram_addr.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030057#include "exec/log.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020058
Bharata B Rao9dfeca72016-05-12 09:18:12 +053059#include "migration/vmstate.h"
60
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020061#include "qemu/range.h"
Michael S. Tsirkin794e8f32015-09-24 14:41:17 +030062#ifndef _WIN32
63#include "qemu/mmap-alloc.h"
64#endif
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020065
blueswir1db7b5422007-05-26 17:36:03 +000066//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000067
pbrook99773bd2006-04-16 15:14:59 +000068#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -040069/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
70 * are protected by the ramlist lock.
71 */
Mike Day0d53d9f2015-01-21 13:45:24 +010072RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030073
74static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030075static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030076
Avi Kivityf6790af2012-10-02 20:13:51 +020077AddressSpace address_space_io;
78AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020079
Paolo Bonzini0844e002013-05-24 14:37:28 +020080MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020081static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020082
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080083/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
84#define RAM_PREALLOC (1 << 0)
85
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080086/* RAM is mmap-ed with MAP_SHARED */
87#define RAM_SHARED (1 << 1)
88
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020089/* Only a portion of RAM (used_length) is actually used, and migrated.
90 * This used_length size can change across reboots.
91 */
92#define RAM_RESIZEABLE (1 << 2)
93
pbrooke2eef172008-06-08 01:09:01 +000094#endif
bellard9fa3e852004-01-04 18:06:42 +000095
Peter Maydell20bccb82016-10-24 16:26:49 +010096#ifdef TARGET_PAGE_BITS_VARY
97int target_page_bits;
98bool target_page_bits_decided;
99#endif
100
Andreas Färberbdc44642013-06-24 23:50:24 +0200101struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +0000102/* current CPU in the current thread. It is only valid inside
103 cpu_exec() */
Paolo Bonzinif240eb62015-08-26 00:17:58 +0200104__thread CPUState *current_cpu;
pbrook2e70f6e2008-06-29 01:03:05 +0000105/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000106 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000107 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +0100108int use_icount;
bellard6a00d602005-11-21 23:25:50 +0000109
Peter Maydell20bccb82016-10-24 16:26:49 +0100110bool set_preferred_target_page_bits(int bits)
111{
112 /* The target page size is the lowest common denominator for all
113 * the CPUs in the system, so we can only make it smaller, never
114 * larger. And we can't make it smaller once we've committed to
115 * a particular size.
116 */
117#ifdef TARGET_PAGE_BITS_VARY
118 assert(bits >= TARGET_PAGE_BITS_MIN);
119 if (target_page_bits == 0 || target_page_bits > bits) {
120 if (target_page_bits_decided) {
121 return false;
122 }
123 target_page_bits = bits;
124 }
125#endif
126 return true;
127}
128
pbrooke2eef172008-06-08 01:09:01 +0000129#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200130
Peter Maydell20bccb82016-10-24 16:26:49 +0100131static void finalize_target_page_bits(void)
132{
133#ifdef TARGET_PAGE_BITS_VARY
134 if (target_page_bits == 0) {
135 target_page_bits = TARGET_PAGE_BITS_MIN;
136 }
137 target_page_bits_decided = true;
138#endif
139}
140
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200141typedef struct PhysPageEntry PhysPageEntry;
142
143struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200144 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200145 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200146 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200147 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200148};
149
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200150#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
151
Paolo Bonzini03f49952013-11-07 17:14:36 +0100152/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100153#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100154
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200155#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100156#define P_L2_SIZE (1 << P_L2_BITS)
157
158#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
159
160typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200161
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200162typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100163 struct rcu_head rcu;
164
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200165 unsigned sections_nb;
166 unsigned sections_nb_alloc;
167 unsigned nodes_nb;
168 unsigned nodes_nb_alloc;
169 Node *nodes;
170 MemoryRegionSection *sections;
171} PhysPageMap;
172
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200173struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100174 struct rcu_head rcu;
175
Fam Zheng729633c2016-03-01 14:18:24 +0800176 MemoryRegionSection *mru_section;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200177 /* This is a multi-level map on the physical address space.
178 * The bottom level has pointers to MemoryRegionSections.
179 */
180 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200181 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200182 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200183};
184
Jan Kiszka90260c62013-05-26 21:46:51 +0200185#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
186typedef struct subpage_t {
187 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200188 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200189 hwaddr base;
Vijaya Kumar K2615fab2016-10-24 16:26:49 +0100190 uint16_t sub_section[];
Jan Kiszka90260c62013-05-26 21:46:51 +0200191} subpage_t;
192
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200193#define PHYS_SECTION_UNASSIGNED 0
194#define PHYS_SECTION_NOTDIRTY 1
195#define PHYS_SECTION_ROM 2
196#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200197
pbrooke2eef172008-06-08 01:09:01 +0000198static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300199static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000200static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000201
Avi Kivity1ec9b902012-01-02 12:47:48 +0200202static MemoryRegion io_mem_watch;
Peter Maydell32857f42015-10-01 15:29:50 +0100203
204/**
205 * CPUAddressSpace: all the information a CPU needs about an AddressSpace
206 * @cpu: the CPU whose AddressSpace this is
207 * @as: the AddressSpace itself
208 * @memory_dispatch: its dispatch pointer (cached, RCU protected)
209 * @tcg_as_listener: listener for tracking changes to the AddressSpace
210 */
211struct CPUAddressSpace {
212 CPUState *cpu;
213 AddressSpace *as;
214 struct AddressSpaceDispatch *memory_dispatch;
215 MemoryListener tcg_as_listener;
216};
217
pbrook6658ffb2007-03-16 23:58:11 +0000218#endif
bellard54936002003-05-13 00:25:15 +0000219
Paul Brook6d9a1302010-02-28 23:55:53 +0000220#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200221
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200222static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223{
Peter Lieven101420b2016-07-15 12:03:50 +0200224 static unsigned alloc_hint = 16;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200225 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
Peter Lieven101420b2016-07-15 12:03:50 +0200226 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, alloc_hint);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200227 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
228 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Peter Lieven101420b2016-07-15 12:03:50 +0200229 alloc_hint = map->nodes_nb_alloc;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200230 }
231}
232
Paolo Bonzinidb946042015-05-21 15:12:29 +0200233static uint32_t phys_map_node_alloc(PhysPageMap *map, bool leaf)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200234{
235 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200236 uint32_t ret;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200237 PhysPageEntry e;
238 PhysPageEntry *p;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200239
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200240 ret = map->nodes_nb++;
Paolo Bonzinidb946042015-05-21 15:12:29 +0200241 p = map->nodes[ret];
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200242 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200243 assert(ret != map->nodes_nb_alloc);
Paolo Bonzinidb946042015-05-21 15:12:29 +0200244
245 e.skip = leaf ? 0 : 1;
246 e.ptr = leaf ? PHYS_SECTION_UNASSIGNED : PHYS_MAP_NODE_NIL;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100247 for (i = 0; i < P_L2_SIZE; ++i) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200248 memcpy(&p[i], &e, sizeof(e));
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200249 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200250 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200251}
252
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200253static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
254 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200255 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200256{
257 PhysPageEntry *p;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100258 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200259
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200260 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzinidb946042015-05-21 15:12:29 +0200261 lp->ptr = phys_map_node_alloc(map, level == 0);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200262 }
Paolo Bonzinidb946042015-05-21 15:12:29 +0200263 p = map->nodes[lp->ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100264 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200265
Paolo Bonzini03f49952013-11-07 17:14:36 +0100266 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200267 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200268 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200269 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200270 *index += step;
271 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200272 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200273 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200274 }
275 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200276 }
277}
278
Avi Kivityac1970f2012-10-03 16:22:53 +0200279static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200280 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200281 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000282{
Avi Kivity29990972012-02-13 20:21:20 +0200283 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200284 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000285
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200286 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000287}
288
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200289/* Compact a non leaf page entry. Simply detect that the entry has a single child,
290 * and update our entry so we can skip it and go directly to the destination.
291 */
Marc-André Lureauefee6782016-09-28 16:37:20 +0400292static void phys_page_compact(PhysPageEntry *lp, Node *nodes)
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200293{
294 unsigned valid_ptr = P_L2_SIZE;
295 int valid = 0;
296 PhysPageEntry *p;
297 int i;
298
299 if (lp->ptr == PHYS_MAP_NODE_NIL) {
300 return;
301 }
302
303 p = nodes[lp->ptr];
304 for (i = 0; i < P_L2_SIZE; i++) {
305 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
306 continue;
307 }
308
309 valid_ptr = i;
310 valid++;
311 if (p[i].skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400312 phys_page_compact(&p[i], nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200313 }
314 }
315
316 /* We can only compress if there's only one child. */
317 if (valid != 1) {
318 return;
319 }
320
321 assert(valid_ptr < P_L2_SIZE);
322
323 /* Don't compress if it won't fit in the # of bits we have. */
324 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
325 return;
326 }
327
328 lp->ptr = p[valid_ptr].ptr;
329 if (!p[valid_ptr].skip) {
330 /* If our only child is a leaf, make this a leaf. */
331 /* By design, we should have made this node a leaf to begin with so we
332 * should never reach here.
333 * But since it's so simple to handle this, let's do it just in case we
334 * change this rule.
335 */
336 lp->skip = 0;
337 } else {
338 lp->skip += p[valid_ptr].skip;
339 }
340}
341
342static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
343{
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200344 if (d->phys_map.skip) {
Marc-André Lureauefee6782016-09-28 16:37:20 +0400345 phys_page_compact(&d->phys_map, d->map.nodes);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200346 }
347}
348
Fam Zheng29cb5332016-03-01 14:18:23 +0800349static inline bool section_covers_addr(const MemoryRegionSection *section,
350 hwaddr addr)
351{
352 /* Memory topology clips a memory region to [0, 2^64); size.hi > 0 means
353 * the section must cover the entire address space.
354 */
Richard Henderson258dfaa2016-06-29 15:48:03 -0700355 return int128_gethi(section->size) ||
Fam Zheng29cb5332016-03-01 14:18:23 +0800356 range_covers_byte(section->offset_within_address_space,
Richard Henderson258dfaa2016-06-29 15:48:03 -0700357 int128_getlo(section->size), addr);
Fam Zheng29cb5332016-03-01 14:18:23 +0800358}
359
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200360static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200361 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000362{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200363 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200364 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200365 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200366
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200367 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200368 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200369 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200370 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200371 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100372 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200373 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200374
Fam Zheng29cb5332016-03-01 14:18:23 +0800375 if (section_covers_addr(&sections[lp.ptr], addr)) {
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200376 return &sections[lp.ptr];
377 } else {
378 return &sections[PHYS_SECTION_UNASSIGNED];
379 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200380}
381
Blue Swirle5548612012-04-21 13:08:33 +0000382bool memory_region_is_unassigned(MemoryRegion *mr)
383{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200384 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000385 && mr != &io_mem_watch;
386}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200387
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100388/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200389static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200390 hwaddr addr,
391 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200392{
Fam Zheng729633c2016-03-01 14:18:24 +0800393 MemoryRegionSection *section = atomic_read(&d->mru_section);
Jan Kiszka90260c62013-05-26 21:46:51 +0200394 subpage_t *subpage;
Fam Zheng729633c2016-03-01 14:18:24 +0800395 bool update;
Jan Kiszka90260c62013-05-26 21:46:51 +0200396
Fam Zheng729633c2016-03-01 14:18:24 +0800397 if (section && section != &d->map.sections[PHYS_SECTION_UNASSIGNED] &&
398 section_covers_addr(section, addr)) {
399 update = false;
400 } else {
401 section = phys_page_find(d->phys_map, addr, d->map.nodes,
402 d->map.sections);
403 update = true;
404 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200405 if (resolve_subpage && section->mr->subpage) {
406 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200407 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200408 }
Fam Zheng729633c2016-03-01 14:18:24 +0800409 if (update) {
410 atomic_set(&d->mru_section, section);
411 }
Jan Kiszka90260c62013-05-26 21:46:51 +0200412 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200413}
414
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100415/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200416static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200417address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200418 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200419{
420 MemoryRegionSection *section;
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200421 MemoryRegion *mr;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100422 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200423
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200424 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200425 /* Compute offset within MemoryRegionSection */
426 addr -= section->offset_within_address_space;
427
428 /* Compute offset within MemoryRegion */
429 *xlat = addr + section->offset_within_region;
430
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200431 mr = section->mr;
Paolo Bonzinib242e0e2015-07-04 00:24:51 +0200432
433 /* MMIO registers can be expected to perform full-width accesses based only
434 * on their address, without considering adjacent registers that could
435 * decode to completely different MemoryRegions. When such registers
436 * exist (e.g. I/O ports 0xcf8 and 0xcf9 on most PC chipsets), MMIO
437 * regions overlap wildly. For this reason we cannot clamp the accesses
438 * here.
439 *
440 * If the length is small (as is the case for address_space_ldl/stl),
441 * everything works fine. If the incoming length is large, however,
442 * the caller really has to do the clamping through memory_access_size.
443 */
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200444 if (memory_region_is_ram(mr)) {
Paolo Bonzinie4a511f2015-06-17 10:36:54 +0200445 diff = int128_sub(section->size, int128_make64(addr));
Paolo Bonzini965eb2f2015-06-17 10:40:27 +0200446 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
447 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200448 return section;
449}
Jan Kiszka90260c62013-05-26 21:46:51 +0200450
Paolo Bonzini41063e12015-03-18 14:21:43 +0100451/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200452MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
453 hwaddr *xlat, hwaddr *plen,
454 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200455{
Avi Kivity30951152012-10-30 13:47:46 +0200456 IOMMUTLBEntry iotlb;
457 MemoryRegionSection *section;
458 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200459
460 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100461 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
462 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200463 mr = section->mr;
464
465 if (!mr->iommu_ops) {
466 break;
467 }
468
Le Tan8d7b8cb2014-08-16 13:55:37 +0800469 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200470 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
471 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700472 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200473 if (!(iotlb.perm & (1 << is_write))) {
474 mr = &io_mem_unassigned;
475 break;
476 }
477
478 as = iotlb.target_as;
479 }
480
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000481 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100482 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700483 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100484 }
485
Avi Kivity30951152012-10-30 13:47:46 +0200486 *xlat = addr;
487 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200488}
489
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100490/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200491MemoryRegionSection *
Peter Maydelld7898cd2016-01-21 14:15:05 +0000492address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200493 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200494{
Avi Kivity30951152012-10-30 13:47:46 +0200495 MemoryRegionSection *section;
Alex Bennéef35e44e2016-10-21 16:34:18 +0100496 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
Peter Maydelld7898cd2016-01-21 14:15:05 +0000497
498 section = address_space_translate_internal(d, addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200499
500 assert(!section->mr->iommu_ops);
501 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200502}
bellard9fa3e852004-01-04 18:06:42 +0000503#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000504
Andreas Färberb170fce2013-01-20 20:23:22 +0100505#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000506
Juan Quintelae59fb372009-09-29 22:48:21 +0200507static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200508{
Andreas Färber259186a2013-01-17 18:51:17 +0100509 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200510
aurel323098dba2009-03-07 21:28:24 +0000511 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
512 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100513 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100514 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000515
516 return 0;
517}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200518
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400519static int cpu_common_pre_load(void *opaque)
520{
521 CPUState *cpu = opaque;
522
Paolo Bonziniadee6422014-12-19 12:53:14 +0100523 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400524
525 return 0;
526}
527
528static bool cpu_common_exception_index_needed(void *opaque)
529{
530 CPUState *cpu = opaque;
531
Paolo Bonziniadee6422014-12-19 12:53:14 +0100532 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400533}
534
535static const VMStateDescription vmstate_cpu_common_exception_index = {
536 .name = "cpu_common/exception_index",
537 .version_id = 1,
538 .minimum_version_id = 1,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200539 .needed = cpu_common_exception_index_needed,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400540 .fields = (VMStateField[]) {
541 VMSTATE_INT32(exception_index, CPUState),
542 VMSTATE_END_OF_LIST()
543 }
544};
545
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300546static bool cpu_common_crash_occurred_needed(void *opaque)
547{
548 CPUState *cpu = opaque;
549
550 return cpu->crash_occurred;
551}
552
553static const VMStateDescription vmstate_cpu_common_crash_occurred = {
554 .name = "cpu_common/crash_occurred",
555 .version_id = 1,
556 .minimum_version_id = 1,
557 .needed = cpu_common_crash_occurred_needed,
558 .fields = (VMStateField[]) {
559 VMSTATE_BOOL(crash_occurred, CPUState),
560 VMSTATE_END_OF_LIST()
561 }
562};
563
Andreas Färber1a1562f2013-06-17 04:09:11 +0200564const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200565 .name = "cpu_common",
566 .version_id = 1,
567 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400568 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200569 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200570 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100571 VMSTATE_UINT32(halted, CPUState),
572 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200573 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400574 },
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200575 .subsections = (const VMStateDescription*[]) {
576 &vmstate_cpu_common_exception_index,
Andrey Smetaninbac05aa2015-07-03 15:01:44 +0300577 &vmstate_cpu_common_crash_occurred,
Juan Quintela5cd8cad2014-09-23 14:09:54 +0200578 NULL
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200579 }
580};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200581
pbrook9656f322008-07-01 20:01:19 +0000582#endif
583
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100584CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400585{
Andreas Färberbdc44642013-06-24 23:50:24 +0200586 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400587
Andreas Färberbdc44642013-06-24 23:50:24 +0200588 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100589 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200590 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100591 }
Glauber Costa950f1472009-06-09 12:15:18 -0400592 }
593
Andreas Färberbdc44642013-06-24 23:50:24 +0200594 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400595}
596
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000597#if !defined(CONFIG_USER_ONLY)
Peter Maydell56943e82016-01-21 14:15:04 +0000598void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000599{
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000600 CPUAddressSpace *newas;
601
602 /* Target code should have set num_ases before calling us */
603 assert(asidx < cpu->num_ases);
604
Peter Maydell56943e82016-01-21 14:15:04 +0000605 if (asidx == 0) {
606 /* address space 0 gets the convenience alias */
607 cpu->as = as;
608 }
609
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000610 /* KVM cannot currently support multiple address spaces. */
611 assert(asidx == 0 || !kvm_enabled());
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000612
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000613 if (!cpu->cpu_ases) {
614 cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000615 }
Peter Maydell32857f42015-10-01 15:29:50 +0100616
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000617 newas = &cpu->cpu_ases[asidx];
618 newas->cpu = cpu;
619 newas->as = as;
Peter Maydell56943e82016-01-21 14:15:04 +0000620 if (tcg_enabled()) {
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000621 newas->tcg_as_listener.commit = tcg_commit;
622 memory_listener_register(&newas->tcg_as_listener, as);
Peter Maydell56943e82016-01-21 14:15:04 +0000623 }
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000624}
Peter Maydell651a5bc2016-01-21 14:15:05 +0000625
626AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx)
627{
628 /* Return the AddressSpace corresponding to the specified index */
629 return cpu->cpu_ases[asidx].as;
630}
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000631#endif
632
Laurent Vivier7bbc1242016-10-20 13:26:04 +0200633void cpu_exec_unrealizefn(CPUState *cpu)
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530634{
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530635 CPUClass *cc = CPU_GET_CLASS(cpu);
636
Paolo Bonzini267f6852016-08-28 03:45:14 +0200637 cpu_list_remove(cpu);
Bharata B Rao9dfeca72016-05-12 09:18:12 +0530638
639 if (cc->vmsd != NULL) {
640 vmstate_unregister(NULL, cc->vmsd, cpu);
641 }
642 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
643 vmstate_unregister(NULL, &vmstate_cpu_common, cpu);
644 }
Bharata B Rao1c59eb32016-05-12 09:18:11 +0530645}
646
Laurent Vivier39e329e2016-10-20 13:26:02 +0200647void cpu_exec_initfn(CPUState *cpu)
bellardfd6ce8f2003-05-14 19:00:11 +0000648{
Peter Maydell56943e82016-01-21 14:15:04 +0000649 cpu->as = NULL;
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000650 cpu->num_ases = 0;
Peter Maydell56943e82016-01-21 14:15:04 +0000651
Eduardo Habkost291135b2015-04-27 17:00:33 -0300652#ifndef CONFIG_USER_ONLY
Eduardo Habkost291135b2015-04-27 17:00:33 -0300653 cpu->thread_id = qemu_get_thread_id();
Peter Crosthwaite6731d862016-01-21 14:15:06 +0000654
655 /* This is a softmmu CPU object, so create a property for it
656 * so users can wire up its memory. (This can't go in qom/cpu.c
657 * because that file is compiled only once for both user-mode
658 * and system builds.) The default if no link is set up is to use
659 * the system address space.
660 */
661 object_property_add_link(OBJECT(cpu), "memory", TYPE_MEMORY_REGION,
662 (Object **)&cpu->memory,
663 qdev_prop_allow_set_link_before_realize,
664 OBJ_PROP_LINK_UNREF_ON_RELEASE,
665 &error_abort);
666 cpu->memory = system_memory;
667 object_ref(OBJECT(cpu->memory));
Eduardo Habkost291135b2015-04-27 17:00:33 -0300668#endif
Laurent Vivier39e329e2016-10-20 13:26:02 +0200669}
670
Laurent Vivierce5b1bb2016-10-20 13:26:03 +0200671void cpu_exec_realizefn(CPUState *cpu, Error **errp)
Laurent Vivier39e329e2016-10-20 13:26:02 +0200672{
673 CPUClass *cc ATTRIBUTE_UNUSED = CPU_GET_CLASS(cpu);
Eduardo Habkost291135b2015-04-27 17:00:33 -0300674
Paolo Bonzini267f6852016-08-28 03:45:14 +0200675 cpu_list_add(cpu);
Igor Mammedov1bc7e522016-07-25 11:59:19 +0200676
677#ifndef CONFIG_USER_ONLY
Andreas Färbere0d47942013-07-29 04:07:50 +0200678 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200679 vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
Andreas Färbere0d47942013-07-29 04:07:50 +0200680 }
Andreas Färberb170fce2013-01-20 20:23:22 +0100681 if (cc->vmsd != NULL) {
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200682 vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
Andreas Färberb170fce2013-01-20 20:23:22 +0100683 }
Paolo Bonzini741da0d2014-06-27 08:40:04 +0200684#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000685}
686
Andreas Färber00b941e2013-06-29 18:55:54 +0200687static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000688{
Peter Maydella9353fe2016-12-06 18:07:09 +0000689 /* Flush the whole TB as this will not have race conditions
690 * even if we don't have proper locking yet.
691 * Ideally we would just invalidate the TBs for the
692 * specified PC.
693 */
694 tb_flush(cpu);
Paul Brook94df27f2010-02-28 23:47:45 +0000695}
bellardd720b932004-04-25 17:57:43 +0000696
Paul Brookc527ee82010-03-01 03:31:14 +0000697#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200698void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000699
700{
701}
702
Peter Maydell3ee887e2014-09-12 14:06:48 +0100703int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
704 int flags)
705{
706 return -ENOSYS;
707}
708
709void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
710{
711}
712
Andreas Färber75a34032013-09-02 16:57:02 +0200713int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000714 int flags, CPUWatchpoint **watchpoint)
715{
716 return -ENOSYS;
717}
718#else
pbrook6658ffb2007-03-16 23:58:11 +0000719/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200720int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000721 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000722{
aliguoric0ce9982008-11-25 22:13:57 +0000723 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000724
Peter Maydell05068c02014-09-12 14:06:48 +0100725 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700726 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200727 error_report("tried to set invalid watchpoint at %"
728 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000729 return -EINVAL;
730 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500731 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000732
aliguoria1d1bb32008-11-18 20:07:32 +0000733 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100734 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000735 wp->flags = flags;
736
aliguori2dc9f412008-11-18 20:56:59 +0000737 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200738 if (flags & BP_GDB) {
739 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
740 } else {
741 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
742 }
aliguoria1d1bb32008-11-18 20:07:32 +0000743
Andreas Färber31b030d2013-09-04 01:29:02 +0200744 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000745
746 if (watchpoint)
747 *watchpoint = wp;
748 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000749}
750
aliguoria1d1bb32008-11-18 20:07:32 +0000751/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200752int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000753 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000754{
aliguoria1d1bb32008-11-18 20:07:32 +0000755 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000756
Andreas Färberff4700b2013-08-26 18:23:18 +0200757 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100758 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000759 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200760 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000761 return 0;
762 }
763 }
aliguoria1d1bb32008-11-18 20:07:32 +0000764 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000765}
766
aliguoria1d1bb32008-11-18 20:07:32 +0000767/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200768void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000769{
Andreas Färberff4700b2013-08-26 18:23:18 +0200770 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000771
Andreas Färber31b030d2013-09-04 01:29:02 +0200772 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000773
Anthony Liguori7267c092011-08-20 22:09:37 -0500774 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000775}
776
aliguoria1d1bb32008-11-18 20:07:32 +0000777/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200778void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000779{
aliguoric0ce9982008-11-25 22:13:57 +0000780 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000781
Andreas Färberff4700b2013-08-26 18:23:18 +0200782 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200783 if (wp->flags & mask) {
784 cpu_watchpoint_remove_by_ref(cpu, wp);
785 }
aliguoric0ce9982008-11-25 22:13:57 +0000786 }
aliguoria1d1bb32008-11-18 20:07:32 +0000787}
Peter Maydell05068c02014-09-12 14:06:48 +0100788
789/* Return true if this watchpoint address matches the specified
790 * access (ie the address range covered by the watchpoint overlaps
791 * partially or completely with the address range covered by the
792 * access).
793 */
794static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
795 vaddr addr,
796 vaddr len)
797{
798 /* We know the lengths are non-zero, but a little caution is
799 * required to avoid errors in the case where the range ends
800 * exactly at the top of the address space and so addr + len
801 * wraps round to zero.
802 */
803 vaddr wpend = wp->vaddr + wp->len - 1;
804 vaddr addrend = addr + len - 1;
805
806 return !(addr > wpend || wp->vaddr > addrend);
807}
808
Paul Brookc527ee82010-03-01 03:31:14 +0000809#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000810
811/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200812int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000813 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000814{
aliguoric0ce9982008-11-25 22:13:57 +0000815 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000816
Anthony Liguori7267c092011-08-20 22:09:37 -0500817 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000818
819 bp->pc = pc;
820 bp->flags = flags;
821
aliguori2dc9f412008-11-18 20:56:59 +0000822 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200823 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200824 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200825 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200826 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200827 }
aliguoria1d1bb32008-11-18 20:07:32 +0000828
Andreas Färberf0c3c502013-08-26 21:22:53 +0200829 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000830
Andreas Färber00b941e2013-06-29 18:55:54 +0200831 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000832 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200833 }
aliguoria1d1bb32008-11-18 20:07:32 +0000834 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000835}
836
837/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200838int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000839{
aliguoria1d1bb32008-11-18 20:07:32 +0000840 CPUBreakpoint *bp;
841
Andreas Färberf0c3c502013-08-26 21:22:53 +0200842 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000843 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200844 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000845 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000846 }
bellard4c3a88a2003-07-26 12:06:08 +0000847 }
aliguoria1d1bb32008-11-18 20:07:32 +0000848 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000849}
850
aliguoria1d1bb32008-11-18 20:07:32 +0000851/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200852void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000853{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200854 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
855
856 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000857
Anthony Liguori7267c092011-08-20 22:09:37 -0500858 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000859}
860
861/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200862void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000863{
aliguoric0ce9982008-11-25 22:13:57 +0000864 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000865
Andreas Färberf0c3c502013-08-26 21:22:53 +0200866 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200867 if (bp->flags & mask) {
868 cpu_breakpoint_remove_by_ref(cpu, bp);
869 }
aliguoric0ce9982008-11-25 22:13:57 +0000870 }
bellard4c3a88a2003-07-26 12:06:08 +0000871}
872
bellardc33a3462003-07-29 20:50:33 +0000873/* enable or disable single step mode. EXCP_DEBUG is returned by the
874 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200875void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000876{
Andreas Färbered2803d2013-06-21 20:20:45 +0200877 if (cpu->singlestep_enabled != enabled) {
878 cpu->singlestep_enabled = enabled;
879 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200880 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200881 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100882 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000883 /* XXX: only flush what is necessary */
Peter Crosthwaitebbd77c12015-06-23 19:31:15 -0700884 tb_flush(cpu);
aliguorie22a25c2009-03-12 20:12:48 +0000885 }
bellardc33a3462003-07-29 20:50:33 +0000886 }
bellardc33a3462003-07-29 20:50:33 +0000887}
888
Andreas Färbera47dddd2013-09-03 17:38:47 +0200889void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000890{
891 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000892 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000893
894 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000895 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000896 fprintf(stderr, "qemu: fatal: ");
897 vfprintf(stderr, fmt, ap);
898 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200899 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
Paolo Bonzini013a2942015-11-13 13:16:27 +0100900 if (qemu_log_separate()) {
Richard Henderson1ee73212016-09-22 15:17:10 -0700901 qemu_log_lock();
aliguori93fcfe32009-01-15 22:34:14 +0000902 qemu_log("qemu: fatal: ");
903 qemu_log_vprintf(fmt, ap2);
904 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200905 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000906 qemu_log_flush();
Richard Henderson1ee73212016-09-22 15:17:10 -0700907 qemu_log_unlock();
aliguori93fcfe32009-01-15 22:34:14 +0000908 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000909 }
pbrook493ae1f2007-11-23 16:53:59 +0000910 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000911 va_end(ap);
Pavel Dovgalyuk76159362015-09-17 19:25:07 +0300912 replay_finish();
Riku Voipiofd052bf2010-01-25 14:30:49 +0200913#if defined(CONFIG_USER_ONLY)
914 {
915 struct sigaction act;
916 sigfillset(&act.sa_mask);
917 act.sa_handler = SIG_DFL;
918 sigaction(SIGABRT, &act, NULL);
919 }
920#endif
bellard75012672003-06-21 13:11:07 +0000921 abort();
922}
923
bellard01243112004-01-04 15:48:17 +0000924#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400925/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200926static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
927{
928 RAMBlock *block;
929
Paolo Bonzini43771532013-09-09 17:58:40 +0200930 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200931 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini68851b92015-10-22 13:51:30 +0200932 return block;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200933 }
Mike Day0dc3f442013-09-05 14:41:35 -0400934 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200935 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200936 goto found;
937 }
938 }
939
940 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
941 abort();
942
943found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200944 /* It is safe to write mru_block outside the iothread lock. This
945 * is what happens:
946 *
947 * mru_block = xxx
948 * rcu_read_unlock()
949 * xxx removed from list
950 * rcu_read_lock()
951 * read mru_block
952 * mru_block = NULL;
953 * call_rcu(reclaim_ramblock, xxx);
954 * rcu_read_unlock()
955 *
956 * atomic_rcu_set is not needed here. The block was already published
957 * when it was placed into the list. Here we're just making an extra
958 * copy of the pointer.
959 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200960 ram_list.mru_block = block;
961 return block;
962}
963
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200964static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000965{
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700966 CPUState *cpu;
Paolo Bonzini041603f2013-09-09 17:49:45 +0200967 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200968 RAMBlock *block;
969 ram_addr_t end;
970
971 end = TARGET_PAGE_ALIGN(start + length);
972 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000973
Mike Day0dc3f442013-09-05 14:41:35 -0400974 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200975 block = qemu_get_ram_block(start);
976 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200977 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Peter Crosthwaite9a135652015-09-10 22:39:41 -0700978 CPU_FOREACH(cpu) {
979 tlb_reset_dirty(cpu, start1, length);
980 }
Mike Day0dc3f442013-09-05 14:41:35 -0400981 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200982}
983
984/* Note: start and end must be within the same ram block. */
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000985bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
986 ram_addr_t length,
987 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200988{
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000989 DirtyMemoryBlocks *blocks;
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000990 unsigned long end, page;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000991 bool dirty = false;
Juan Quintelad24981d2012-05-22 00:42:40 +0200992
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000993 if (length == 0) {
994 return false;
995 }
996
997 end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
998 page = start >> TARGET_PAGE_BITS;
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +0000999
1000 rcu_read_lock();
1001
1002 blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
1003
1004 while (page < end) {
1005 unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
1006 unsigned long offset = page % DIRTY_MEMORY_BLOCK_SIZE;
1007 unsigned long num = MIN(end - page, DIRTY_MEMORY_BLOCK_SIZE - offset);
1008
1009 dirty |= bitmap_test_and_clear_atomic(blocks->blocks[idx],
1010 offset, num);
1011 page += num;
1012 }
1013
1014 rcu_read_unlock();
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001015
1016 if (dirty && tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +02001017 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001018 }
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +00001019
1020 return dirty;
bellard1ccde1c2004-02-06 19:46:14 +00001021}
1022
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001023/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +02001024hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001025 MemoryRegionSection *section,
1026 target_ulong vaddr,
1027 hwaddr paddr, hwaddr xlat,
1028 int prot,
1029 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +00001030{
Avi Kivitya8170e52012-10-23 12:30:10 +02001031 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001032 CPUWatchpoint *wp;
1033
Blue Swirlcc5bea62012-04-14 14:56:48 +00001034 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001035 /* Normal RAM. */
Paolo Bonzinie4e69792016-03-01 10:44:50 +01001036 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001037 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001038 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +00001039 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001040 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +00001041 }
1042 } else {
Peter Maydell0b8e2c12015-07-20 12:27:16 +01001043 AddressSpaceDispatch *d;
1044
1045 d = atomic_rcu_read(&section->address_space->dispatch);
1046 iotlb = section - d->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001047 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +00001048 }
1049
1050 /* Make accesses to pages with watchpoints go via the
1051 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +02001052 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001053 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +00001054 /* Avoid trapping reads of pages with a write breakpoint. */
1055 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +02001056 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +00001057 *address |= TLB_MMIO;
1058 break;
1059 }
1060 }
1061 }
1062
1063 return iotlb;
1064}
bellard9fa3e852004-01-04 18:06:42 +00001065#endif /* defined(CONFIG_USER_ONLY) */
1066
pbrooke2eef172008-06-08 01:09:01 +00001067#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00001068
Anthony Liguoric227f092009-10-01 16:12:16 -05001069static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02001070 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001071static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +02001072
Igor Mammedova2b257d2014-10-31 16:38:37 +00001073static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
1074 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +02001075
1076/*
1077 * Set a custom physical guest memory alloator.
1078 * Accelerators with unusual needs may need this. Hopefully, we can
1079 * get rid of it eventually.
1080 */
Igor Mammedova2b257d2014-10-31 16:38:37 +00001081void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +02001082{
1083 phys_mem_alloc = alloc;
1084}
1085
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001086static uint16_t phys_section_add(PhysPageMap *map,
1087 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +02001088{
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001089 /* The physical section number is ORed with a page-aligned
1090 * pointer to produce the iotlb entries. Thus it should
1091 * never overflow into the page-aligned value.
1092 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001093 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +02001094
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001095 if (map->sections_nb == map->sections_nb_alloc) {
1096 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
1097 map->sections = g_renew(MemoryRegionSection, map->sections,
1098 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +02001099 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001100 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001101 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001102 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +02001103}
1104
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001105static void phys_section_destroy(MemoryRegion *mr)
1106{
Don Slutz55b4e802015-11-30 17:11:04 -05001107 bool have_sub_page = mr->subpage;
1108
Paolo Bonzinidfde4e62013-05-06 10:46:11 +02001109 memory_region_unref(mr);
1110
Don Slutz55b4e802015-11-30 17:11:04 -05001111 if (have_sub_page) {
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001112 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07001113 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001114 g_free(subpage);
1115 }
1116}
1117
Paolo Bonzini60926662013-05-29 12:30:26 +02001118static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +02001119{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001120 while (map->sections_nb > 0) {
1121 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +02001122 phys_section_destroy(section->mr);
1123 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02001124 g_free(map->sections);
1125 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +02001126}
1127
Avi Kivityac1970f2012-10-03 16:22:53 +02001128static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001129{
1130 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02001131 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02001132 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +02001133 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001134 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001135 MemoryRegionSection subsection = {
1136 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001137 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +02001138 };
Avi Kivitya8170e52012-10-23 12:30:10 +02001139 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001140
Avi Kivityf3705d52012-03-08 16:16:34 +02001141 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001142
Avi Kivityf3705d52012-03-08 16:16:34 +02001143 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001144 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +01001145 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +02001146 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02001147 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001148 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001149 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001150 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001151 }
1152 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001153 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001154 subpage_register(subpage, start, end,
1155 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001156}
1157
1158
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001159static void register_multipage(AddressSpaceDispatch *d,
1160 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001161{
Avi Kivitya8170e52012-10-23 12:30:10 +02001162 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001163 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001164 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1165 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001166
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001167 assert(num_pages);
1168 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001169}
1170
Avi Kivityac1970f2012-10-03 16:22:53 +02001171static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001172{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001173 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001174 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001175 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001176 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001177
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001178 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1179 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1180 - now.offset_within_address_space;
1181
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001182 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001183 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001184 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001185 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001186 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001187 while (int128_ne(remain.size, now.size)) {
1188 remain.size = int128_sub(remain.size, now.size);
1189 remain.offset_within_address_space += int128_get64(now.size);
1190 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001191 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001192 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001193 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001194 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001195 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001196 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001197 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001198 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001199 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001200 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001201 }
1202}
1203
Sheng Yang62a27442010-01-26 19:21:16 +08001204void qemu_flush_coalesced_mmio_buffer(void)
1205{
1206 if (kvm_enabled())
1207 kvm_flush_coalesced_mmio_buffer();
1208}
1209
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001210void qemu_mutex_lock_ramlist(void)
1211{
1212 qemu_mutex_lock(&ram_list.mutex);
1213}
1214
1215void qemu_mutex_unlock_ramlist(void)
1216{
1217 qemu_mutex_unlock(&ram_list.mutex);
1218}
1219
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001220#ifdef __linux__
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001221static int64_t get_file_size(int fd)
1222{
1223 int64_t size = lseek(fd, 0, SEEK_END);
1224 if (size < 0) {
1225 return -errno;
1226 }
1227 return size;
1228}
1229
Alex Williamson04b16652010-07-02 11:13:17 -06001230static void *file_ram_alloc(RAMBlock *block,
1231 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001232 const char *path,
1233 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001234{
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001235 bool unlink_on_error = false;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001236 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001237 char *sanitized_name;
1238 char *c;
Igor Mammedov056b68a2016-07-20 11:54:03 +02001239 void *area = MAP_FAILED;
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001240 int fd = -1;
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001241 int64_t file_size;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001242
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001243 if (kvm_enabled() && !kvm_has_sync_mmu()) {
1244 error_setg(errp,
1245 "host lacks kvm mmu notifiers, -mem-path unsupported");
1246 return NULL;
1247 }
1248
1249 for (;;) {
1250 fd = open(path, O_RDWR);
1251 if (fd >= 0) {
1252 /* @path names an existing file, use it */
1253 break;
1254 }
1255 if (errno == ENOENT) {
1256 /* @path names a file that doesn't exist, create it */
1257 fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
1258 if (fd >= 0) {
1259 unlink_on_error = true;
1260 break;
1261 }
1262 } else if (errno == EISDIR) {
1263 /* @path names a directory, create a file there */
1264 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
1265 sanitized_name = g_strdup(memory_region_name(block->mr));
1266 for (c = sanitized_name; *c != '\0'; c++) {
1267 if (*c == '/') {
1268 *c = '_';
1269 }
1270 }
1271
1272 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1273 sanitized_name);
1274 g_free(sanitized_name);
1275
1276 fd = mkstemp(filename);
1277 if (fd >= 0) {
1278 unlink(filename);
1279 g_free(filename);
1280 break;
1281 }
1282 g_free(filename);
1283 }
1284 if (errno != EEXIST && errno != EINTR) {
1285 error_setg_errno(errp, errno,
1286 "can't open backing store %s for guest RAM",
1287 path);
1288 goto error;
1289 }
1290 /*
1291 * Try again on EINTR and EEXIST. The latter happens when
1292 * something else creates the file between our two open().
1293 */
1294 }
1295
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001296 block->page_size = qemu_fd_getpagesize(fd);
Haozhong Zhang83606682016-10-24 20:49:37 +08001297 block->mr->align = block->page_size;
1298#if defined(__s390x__)
1299 if (kvm_enabled()) {
1300 block->mr->align = MAX(block->mr->align, QEMU_VMALLOC_ALIGN);
1301 }
1302#endif
Marcelo Tosattic9027602010-03-01 20:25:08 -03001303
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001304 file_size = get_file_size(fd);
1305
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001306 if (memory < block->page_size) {
Hu Tao557529d2014-09-09 13:28:00 +08001307 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001308 "or larger than page size 0x%zx",
1309 memory, block->page_size);
Hu Tao557529d2014-09-09 13:28:00 +08001310 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001311 }
1312
Haozhong Zhang1775f112016-11-02 09:05:51 +08001313 if (file_size > 0 && file_size < memory) {
1314 error_setg(errp, "backing store %s size 0x%" PRIx64
1315 " does not match 'size' option 0x" RAM_ADDR_FMT,
1316 path, file_size, memory);
1317 goto error;
1318 }
1319
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001320 memory = ROUND_UP(memory, block->page_size);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001321
1322 /*
1323 * ftruncate is not supported by hugetlbfs in older
1324 * hosts, so don't bother bailing out on errors.
1325 * If anything goes wrong with it under other filesystems,
1326 * mmap will fail.
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001327 *
1328 * Do not truncate the non-empty backend file to avoid corrupting
1329 * the existing data in the file. Disabling shrinking is not
1330 * enough. For example, the current vNVDIMM implementation stores
1331 * the guest NVDIMM labels at the end of the backend file. If the
1332 * backend file is later extended, QEMU will not be able to find
1333 * those labels. Therefore, extending the non-empty backend file
1334 * is disabled as well.
Marcelo Tosattic9027602010-03-01 20:25:08 -03001335 */
Haozhong Zhangd6af99c2016-10-27 12:22:58 +08001336 if (!file_size && ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001337 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001338 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001339
Dominik Dingeld2f39ad2016-04-25 13:55:38 +02001340 area = qemu_ram_mmap(fd, memory, block->mr->align,
1341 block->flags & RAM_SHARED);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001342 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001343 error_setg_errno(errp, errno,
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001344 "unable to map backing store for guest RAM");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001345 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001346 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001347
1348 if (mem_prealloc) {
Igor Mammedov056b68a2016-07-20 11:54:03 +02001349 os_mem_prealloc(fd, area, memory, errp);
1350 if (errp && *errp) {
1351 goto error;
1352 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001353 }
1354
Alex Williamson04b16652010-07-02 11:13:17 -06001355 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001356 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001357
1358error:
Igor Mammedov056b68a2016-07-20 11:54:03 +02001359 if (area != MAP_FAILED) {
1360 qemu_ram_munmap(area, memory);
1361 }
Markus Armbrusterfd97fd42016-03-07 20:25:13 +01001362 if (unlink_on_error) {
1363 unlink(path);
1364 }
Paolo Bonzini5c3ece72016-03-17 15:53:13 +01001365 if (fd != -1) {
1366 close(fd);
1367 }
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001368 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001369}
1370#endif
1371
Mike Day0dc3f442013-09-05 14:41:35 -04001372/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001373static ram_addr_t find_ram_offset(ram_addr_t size)
1374{
Alex Williamson04b16652010-07-02 11:13:17 -06001375 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001376 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001377
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001378 assert(size != 0); /* it would hand out same offset multiple times */
1379
Mike Day0dc3f442013-09-05 14:41:35 -04001380 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001381 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001382 }
Alex Williamson04b16652010-07-02 11:13:17 -06001383
Mike Day0dc3f442013-09-05 14:41:35 -04001384 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001385 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001386
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001387 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001388
Mike Day0dc3f442013-09-05 14:41:35 -04001389 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001390 if (next_block->offset >= end) {
1391 next = MIN(next, next_block->offset);
1392 }
1393 }
1394 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001395 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001396 mingap = next - end;
1397 }
1398 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001399
1400 if (offset == RAM_ADDR_MAX) {
1401 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1402 (uint64_t)size);
1403 abort();
1404 }
1405
Alex Williamson04b16652010-07-02 11:13:17 -06001406 return offset;
1407}
1408
Juan Quintela652d7ec2012-07-20 10:37:54 +02001409ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001410{
Alex Williamsond17b5282010-06-25 11:08:38 -06001411 RAMBlock *block;
1412 ram_addr_t last = 0;
1413
Mike Day0dc3f442013-09-05 14:41:35 -04001414 rcu_read_lock();
1415 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001416 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001417 }
Mike Day0dc3f442013-09-05 14:41:35 -04001418 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001419 return last;
1420}
1421
Jason Baronddb97f12012-08-02 15:44:16 -04001422static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1423{
1424 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001425
1426 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001427 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001428 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1429 if (ret) {
1430 perror("qemu_madvise");
1431 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1432 "but dump_guest_core=off specified\n");
1433 }
1434 }
1435}
1436
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001437const char *qemu_ram_get_idstr(RAMBlock *rb)
1438{
1439 return rb->idstr;
1440}
1441
Mike Dayae3a7042013-09-05 14:41:35 -04001442/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001443void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
Hu Tao20cfe882014-04-02 15:13:26 +08001444{
Gongleifa53a0e2016-05-10 10:04:59 +08001445 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001446
Avi Kivityc5705a72011-12-20 15:59:12 +02001447 assert(new_block);
1448 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001449
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001450 if (dev) {
1451 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001452 if (id) {
1453 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001454 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001455 }
1456 }
1457 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1458
Gongleiab0a9952016-05-10 10:05:00 +08001459 rcu_read_lock();
Mike Day0dc3f442013-09-05 14:41:35 -04001460 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Gongleifa53a0e2016-05-10 10:04:59 +08001461 if (block != new_block &&
1462 !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001463 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1464 new_block->idstr);
1465 abort();
1466 }
1467 }
Mike Day0dc3f442013-09-05 14:41:35 -04001468 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001469}
1470
Mike Dayae3a7042013-09-05 14:41:35 -04001471/* Called with iothread lock held. */
Gongleifa53a0e2016-05-10 10:04:59 +08001472void qemu_ram_unset_idstr(RAMBlock *block)
Hu Tao20cfe882014-04-02 15:13:26 +08001473{
Mike Dayae3a7042013-09-05 14:41:35 -04001474 /* FIXME: arch_init.c assumes that this is not called throughout
1475 * migration. Ignore the problem since hot-unplug during migration
1476 * does not work anyway.
1477 */
Hu Tao20cfe882014-04-02 15:13:26 +08001478 if (block) {
1479 memset(block->idstr, 0, sizeof(block->idstr));
1480 }
1481}
1482
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001483size_t qemu_ram_pagesize(RAMBlock *rb)
1484{
1485 return rb->page_size;
1486}
1487
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001488static int memory_try_enable_merging(void *addr, size_t len)
1489{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001490 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001491 /* disabled by the user */
1492 return 0;
1493 }
1494
1495 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1496}
1497
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001498/* Only legal before guest might have detected the memory size: e.g. on
1499 * incoming migration, or right after reset.
1500 *
1501 * As memory core doesn't know how is memory accessed, it is up to
1502 * resize callback to update device state and/or add assertions to detect
1503 * misuse, if necessary.
1504 */
Gongleifa53a0e2016-05-10 10:04:59 +08001505int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001506{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001507 assert(block);
1508
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001509 newsize = HOST_PAGE_ALIGN(newsize);
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001510
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001511 if (block->used_length == newsize) {
1512 return 0;
1513 }
1514
1515 if (!(block->flags & RAM_RESIZEABLE)) {
1516 error_setg_errno(errp, EINVAL,
1517 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1518 " in != 0x" RAM_ADDR_FMT, block->idstr,
1519 newsize, block->used_length);
1520 return -EINVAL;
1521 }
1522
1523 if (block->max_length < newsize) {
1524 error_setg_errno(errp, EINVAL,
1525 "Length too large: %s: 0x" RAM_ADDR_FMT
1526 " > 0x" RAM_ADDR_FMT, block->idstr,
1527 newsize, block->max_length);
1528 return -EINVAL;
1529 }
1530
1531 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1532 block->used_length = newsize;
Paolo Bonzini58d27072015-03-23 11:56:01 +01001533 cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
1534 DIRTY_CLIENTS_ALL);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001535 memory_region_set_size(block->mr, newsize);
1536 if (block->resized) {
1537 block->resized(block->idstr, newsize, block->host);
1538 }
1539 return 0;
1540}
1541
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001542/* Called with ram_list.mutex held */
1543static void dirty_memory_extend(ram_addr_t old_ram_size,
1544 ram_addr_t new_ram_size)
1545{
1546 ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
1547 DIRTY_MEMORY_BLOCK_SIZE);
1548 ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
1549 DIRTY_MEMORY_BLOCK_SIZE);
1550 int i;
1551
1552 /* Only need to extend if block count increased */
1553 if (new_num_blocks <= old_num_blocks) {
1554 return;
1555 }
1556
1557 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1558 DirtyMemoryBlocks *old_blocks;
1559 DirtyMemoryBlocks *new_blocks;
1560 int j;
1561
1562 old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
1563 new_blocks = g_malloc(sizeof(*new_blocks) +
1564 sizeof(new_blocks->blocks[0]) * new_num_blocks);
1565
1566 if (old_num_blocks) {
1567 memcpy(new_blocks->blocks, old_blocks->blocks,
1568 old_num_blocks * sizeof(old_blocks->blocks[0]));
1569 }
1570
1571 for (j = old_num_blocks; j < new_num_blocks; j++) {
1572 new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
1573 }
1574
1575 atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
1576
1577 if (old_blocks) {
1578 g_free_rcu(old_blocks, rcu);
1579 }
1580 }
1581}
1582
Fam Zheng528f46a2016-03-01 14:18:18 +08001583static void ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001584{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001585 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001586 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001587 ram_addr_t old_ram_size, new_ram_size;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001588 Error *err = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001589
1590 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001591
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001592 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001593 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001594
1595 if (!new_block->host) {
1596 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001597 xen_ram_alloc(new_block->offset, new_block->max_length,
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001598 new_block->mr, &err);
1599 if (err) {
1600 error_propagate(errp, err);
1601 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001602 return;
Markus Armbruster37aa7a02016-01-14 16:09:39 +01001603 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001604 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001605 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001606 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001607 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001608 error_setg_errno(errp, errno,
1609 "cannot set up guest memory '%s'",
1610 memory_region_name(new_block->mr));
1611 qemu_mutex_unlock_ramlist();
Paolo Bonzini39c350e2016-03-09 18:14:01 +01001612 return;
Markus Armbruster39228252013-07-31 15:11:11 +02001613 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001614 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001615 }
1616 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001617
Li Zhijiandd631692015-07-02 20:18:06 +08001618 new_ram_size = MAX(old_ram_size,
1619 (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
1620 if (new_ram_size > old_ram_size) {
1621 migration_bitmap_extend(old_ram_size, new_ram_size);
Stefan Hajnoczi5b82b702016-01-25 13:33:20 +00001622 dirty_memory_extend(old_ram_size, new_ram_size);
Li Zhijiandd631692015-07-02 20:18:06 +08001623 }
Mike Day0d53d9f2015-01-21 13:45:24 +01001624 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1625 * QLIST (which has an RCU-friendly variant) does not have insertion at
1626 * tail, so save the last element in last_block.
1627 */
Mike Day0dc3f442013-09-05 14:41:35 -04001628 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001629 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001630 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001631 break;
1632 }
1633 }
1634 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001635 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001636 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001637 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001638 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001639 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001640 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001641 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001642
Mike Day0dc3f442013-09-05 14:41:35 -04001643 /* Write list before version */
1644 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001645 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001646 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001647
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001648 cpu_physical_memory_set_dirty_range(new_block->offset,
Paolo Bonzini58d27072015-03-23 11:56:01 +01001649 new_block->used_length,
1650 DIRTY_CLIENTS_ALL);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001651
Paolo Bonzinia904c912015-01-21 16:18:35 +01001652 if (new_block->host) {
1653 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1654 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
Cao jinc2cd6272016-09-12 14:34:56 +08001655 /* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
Paolo Bonzinia904c912015-01-21 16:18:35 +01001656 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001657 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001658}
1659
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001660#ifdef __linux__
Fam Zheng528f46a2016-03-01 14:18:18 +08001661RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
1662 bool share, const char *mem_path,
1663 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001664{
1665 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001666 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001667
1668 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001669 error_setg(errp, "-mem-path not supported with Xen");
Fam Zheng528f46a2016-03-01 14:18:18 +08001670 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001671 }
1672
1673 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1674 /*
1675 * file_ram_alloc() needs to allocate just like
1676 * phys_mem_alloc, but we haven't bothered to provide
1677 * a hook there.
1678 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001679 error_setg(errp,
1680 "-mem-path not supported with this accelerator");
Fam Zheng528f46a2016-03-01 14:18:18 +08001681 return NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001682 }
1683
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001684 size = HOST_PAGE_ALIGN(size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001685 new_block = g_malloc0(sizeof(*new_block));
1686 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001687 new_block->used_length = size;
1688 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001689 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001690 new_block->host = file_ram_alloc(new_block, size,
1691 mem_path, errp);
1692 if (!new_block->host) {
1693 g_free(new_block);
Fam Zheng528f46a2016-03-01 14:18:18 +08001694 return NULL;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001695 }
1696
Fam Zheng528f46a2016-03-01 14:18:18 +08001697 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001698 if (local_err) {
1699 g_free(new_block);
1700 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001701 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001702 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001703 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001704}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001705#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001706
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001707static
Fam Zheng528f46a2016-03-01 14:18:18 +08001708RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1709 void (*resized)(const char*,
1710 uint64_t length,
1711 void *host),
1712 void *host, bool resizeable,
1713 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001714{
1715 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001716 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001717
Dr. David Alan Gilbert4ed023c2015-11-05 18:11:16 +00001718 size = HOST_PAGE_ALIGN(size);
1719 max_size = HOST_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001720 new_block = g_malloc0(sizeof(*new_block));
1721 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001722 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001723 new_block->used_length = size;
1724 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001725 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001726 new_block->fd = -1;
Dr. David Alan Gilbert863e9622016-09-29 20:09:37 +01001727 new_block->page_size = getpagesize();
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001728 new_block->host = host;
1729 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001730 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001731 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001732 if (resizeable) {
1733 new_block->flags |= RAM_RESIZEABLE;
1734 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001735 ram_block_add(new_block, &local_err);
Hu Taoef701d72014-09-09 13:27:54 +08001736 if (local_err) {
1737 g_free(new_block);
1738 error_propagate(errp, local_err);
Fam Zheng528f46a2016-03-01 14:18:18 +08001739 return NULL;
Hu Taoef701d72014-09-09 13:27:54 +08001740 }
Fam Zheng528f46a2016-03-01 14:18:18 +08001741 return new_block;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001742}
1743
Fam Zheng528f46a2016-03-01 14:18:18 +08001744RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001745 MemoryRegion *mr, Error **errp)
1746{
1747 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1748}
1749
Fam Zheng528f46a2016-03-01 14:18:18 +08001750RAMBlock *qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001751{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001752 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1753}
1754
Fam Zheng528f46a2016-03-01 14:18:18 +08001755RAMBlock *qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001756 void (*resized)(const char*,
1757 uint64_t length,
1758 void *host),
1759 MemoryRegion *mr, Error **errp)
1760{
1761 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001762}
bellarde9a1ab12007-02-08 23:08:38 +00001763
Paolo Bonzini43771532013-09-09 17:58:40 +02001764static void reclaim_ramblock(RAMBlock *block)
1765{
1766 if (block->flags & RAM_PREALLOC) {
1767 ;
1768 } else if (xen_enabled()) {
1769 xen_invalidate_map_cache_entry(block->host);
1770#ifndef _WIN32
1771 } else if (block->fd >= 0) {
Eduardo Habkost2f3a2bb2015-11-06 20:11:21 -02001772 qemu_ram_munmap(block->host, block->max_length);
Paolo Bonzini43771532013-09-09 17:58:40 +02001773 close(block->fd);
1774#endif
1775 } else {
1776 qemu_anon_ram_free(block->host, block->max_length);
1777 }
1778 g_free(block);
1779}
1780
Fam Zhengf1060c52016-03-01 14:18:22 +08001781void qemu_ram_free(RAMBlock *block)
bellarde9a1ab12007-02-08 23:08:38 +00001782{
Marc-André Lureau85bc2a12016-03-29 13:20:51 +02001783 if (!block) {
1784 return;
1785 }
1786
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001787 qemu_mutex_lock_ramlist();
Fam Zhengf1060c52016-03-01 14:18:22 +08001788 QLIST_REMOVE_RCU(block, next);
1789 ram_list.mru_block = NULL;
1790 /* Write list before version */
1791 smp_wmb();
1792 ram_list.version++;
1793 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001794 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001795}
1796
Huang Yingcd19cfa2011-03-02 08:56:19 +01001797#ifndef _WIN32
1798void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1799{
1800 RAMBlock *block;
1801 ram_addr_t offset;
1802 int flags;
1803 void *area, *vaddr;
1804
Mike Day0dc3f442013-09-05 14:41:35 -04001805 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001806 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001807 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001808 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001809 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001810 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001811 } else if (xen_enabled()) {
1812 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001813 } else {
1814 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001815 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001816 flags |= (block->flags & RAM_SHARED ?
1817 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001818 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1819 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001820 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001821 /*
1822 * Remap needs to match alloc. Accelerators that
1823 * set phys_mem_alloc never remap. If they did,
1824 * we'd need a remap hook here.
1825 */
1826 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1827
Huang Yingcd19cfa2011-03-02 08:56:19 +01001828 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1829 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1830 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001831 }
1832 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001833 fprintf(stderr, "Could not remap addr: "
1834 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001835 length, addr);
1836 exit(1);
1837 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001838 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001839 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001840 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001841 }
1842 }
1843}
1844#endif /* !_WIN32 */
1845
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001846/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001847 * This should not be used for general purpose DMA. Use address_space_map
1848 * or address_space_rw instead. For local memory (e.g. video ram) that the
1849 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001850 *
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001851 * Called within RCU critical section.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001852 */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001853void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr)
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001854{
Gonglei3655cb92016-02-20 10:35:20 +08001855 RAMBlock *block = ram_block;
1856
1857 if (block == NULL) {
1858 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001859 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001860 }
Mike Dayae3a7042013-09-05 14:41:35 -04001861
1862 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001863 /* We need to check if the requested address is in the RAM
1864 * because we don't want to map the entire memory in QEMU.
1865 * In that case just map until the end of the page.
1866 */
1867 if (block->offset == 0) {
Paolo Bonzini49b24af2015-12-16 10:30:47 +01001868 return xen_map_cache(addr, 0, 0);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001869 }
Mike Dayae3a7042013-09-05 14:41:35 -04001870
1871 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001872 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001873 return ramblock_ptr(block, addr);
pbrookdc828ca2009-04-09 22:21:07 +00001874}
1875
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001876/* Return a host pointer to guest's ram. Similar to qemu_map_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001877 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001878 *
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001879 * Called within RCU critical section.
Mike Dayae3a7042013-09-05 14:41:35 -04001880 */
Gonglei3655cb92016-02-20 10:35:20 +08001881static void *qemu_ram_ptr_length(RAMBlock *ram_block, ram_addr_t addr,
1882 hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001883{
Gonglei3655cb92016-02-20 10:35:20 +08001884 RAMBlock *block = ram_block;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001885 if (*size == 0) {
1886 return NULL;
1887 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001888
Gonglei3655cb92016-02-20 10:35:20 +08001889 if (block == NULL) {
1890 block = qemu_get_ram_block(addr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001891 addr -= block->offset;
Gonglei3655cb92016-02-20 10:35:20 +08001892 }
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001893 *size = MIN(*size, block->max_length - addr);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001894
1895 if (xen_enabled() && block->host == NULL) {
1896 /* We need to check if the requested address is in the RAM
1897 * because we don't want to map the entire memory in QEMU.
1898 * In that case just map the requested area.
1899 */
1900 if (block->offset == 0) {
1901 return xen_map_cache(addr, *size, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001902 }
1903
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001904 block->host = xen_map_cache(block->offset, block->max_length, 1);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001905 }
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01001906
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01001907 return ramblock_ptr(block, addr);
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001908}
1909
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001910/*
1911 * Translates a host ptr back to a RAMBlock, a ram_addr and an offset
1912 * in that RAMBlock.
1913 *
1914 * ptr: Host pointer to look up
1915 * round_offset: If true round the result offset down to a page boundary
1916 * *ram_addr: set to result ram_addr
1917 * *offset: set to result offset within the RAMBlock
1918 *
1919 * Returns: RAMBlock (or NULL if not found)
Mike Dayae3a7042013-09-05 14:41:35 -04001920 *
1921 * By the time this function returns, the returned pointer is not protected
1922 * by RCU anymore. If the caller is not within an RCU critical section and
1923 * does not hold the iothread lock, it must have other means of protecting the
1924 * pointer, such as a reference to the region that includes the incoming
1925 * ram_addr_t.
1926 */
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001927RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001928 ram_addr_t *offset)
pbrook5579c7f2009-04-11 14:47:08 +00001929{
pbrook94a6b542009-04-11 17:15:54 +00001930 RAMBlock *block;
1931 uint8_t *host = ptr;
1932
Jan Kiszka868bb332011-06-21 22:59:09 +02001933 if (xen_enabled()) {
Paolo Bonzinif615f392016-05-26 10:07:50 +02001934 ram_addr_t ram_addr;
Mike Day0dc3f442013-09-05 14:41:35 -04001935 rcu_read_lock();
Paolo Bonzinif615f392016-05-26 10:07:50 +02001936 ram_addr = xen_ram_addr_from_mapcache(ptr);
1937 block = qemu_get_ram_block(ram_addr);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001938 if (block) {
Anthony PERARDd6b6aec2016-06-09 16:56:17 +01001939 *offset = ram_addr - block->offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001940 }
Mike Day0dc3f442013-09-05 14:41:35 -04001941 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001942 return block;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001943 }
1944
Mike Day0dc3f442013-09-05 14:41:35 -04001945 rcu_read_lock();
1946 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001947 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001948 goto found;
1949 }
1950
Mike Day0dc3f442013-09-05 14:41:35 -04001951 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001952 /* This case append when the block is not mapped. */
1953 if (block->host == NULL) {
1954 continue;
1955 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001956 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001957 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001958 }
pbrook94a6b542009-04-11 17:15:54 +00001959 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001960
Mike Day0dc3f442013-09-05 14:41:35 -04001961 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001962 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001963
1964found:
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001965 *offset = (host - block->host);
1966 if (round_offset) {
1967 *offset &= TARGET_PAGE_MASK;
1968 }
Mike Day0dc3f442013-09-05 14:41:35 -04001969 rcu_read_unlock();
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001970 return block;
1971}
1972
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00001973/*
1974 * Finds the named RAMBlock
1975 *
1976 * name: The name of RAMBlock to find
1977 *
1978 * Returns: RAMBlock (or NULL if not found)
1979 */
1980RAMBlock *qemu_ram_block_by_name(const char *name)
1981{
1982 RAMBlock *block;
1983
1984 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1985 if (!strcmp(name, block->idstr)) {
1986 return block;
1987 }
1988 }
1989
1990 return NULL;
1991}
1992
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001993/* Some of the softmmu routines need to translate from a host pointer
1994 (typically a TLB entry) back to a ram offset. */
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01001995ram_addr_t qemu_ram_addr_from_host(void *ptr)
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001996{
1997 RAMBlock *block;
Paolo Bonzinif615f392016-05-26 10:07:50 +02001998 ram_addr_t offset;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00001999
Paolo Bonzinif615f392016-05-26 10:07:50 +02002000 block = qemu_ram_block_from_host(ptr, false, &offset);
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002001 if (!block) {
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002002 return RAM_ADDR_INVALID;
Dr. David Alan Gilbert422148d2015-11-05 18:10:32 +00002003 }
2004
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01002005 return block->offset + offset;
Marcelo Tosattie8902612010-10-11 15:31:19 -03002006}
Alex Williamsonf471a172010-06-11 11:11:42 -06002007
Paolo Bonzini49b24af2015-12-16 10:30:47 +01002008/* Called within RCU critical section. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002009static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002010 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002011{
Alex Bennéeba051fb2016-10-27 16:10:16 +01002012 bool locked = false;
2013
Juan Quintela52159192013-10-08 12:44:04 +02002014 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Alex Bennéeba051fb2016-10-27 16:10:16 +01002015 locked = true;
2016 tb_lock();
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002017 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00002018 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002019 switch (size) {
2020 case 1:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002021 stb_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002022 break;
2023 case 2:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002024 stw_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002025 break;
2026 case 4:
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002027 stl_p(qemu_map_ram_ptr(NULL, ram_addr), val);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002028 break;
2029 default:
2030 abort();
2031 }
Alex Bennéeba051fb2016-10-27 16:10:16 +01002032
2033 if (locked) {
2034 tb_unlock();
2035 }
2036
Paolo Bonzini58d27072015-03-23 11:56:01 +01002037 /* Set both VGA and migration bits for simplicity and to remove
2038 * the notdirty callback faster.
2039 */
2040 cpu_physical_memory_set_dirty_range(ram_addr, size,
2041 DIRTY_CLIENTS_NOCODE);
bellardf23db162005-08-21 19:12:28 +00002042 /* we remove the notdirty callback only if the code has been
2043 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02002044 if (!cpu_physical_memory_is_clean(ram_addr)) {
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07002045 tlb_set_dirty(current_cpu, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02002046 }
bellard1ccde1c2004-02-06 19:46:14 +00002047}
2048
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002049static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
2050 unsigned size, bool is_write)
2051{
2052 return is_write;
2053}
2054
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002055static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002056 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02002057 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002058 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002059};
2060
pbrook0f459d12008-06-09 00:20:13 +00002061/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002062static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002063{
Andreas Färber93afead2013-08-26 03:41:01 +02002064 CPUState *cpu = current_cpu;
Sergey Fedorov568496c2016-02-11 11:17:32 +00002065 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber93afead2013-08-26 03:41:01 +02002066 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00002067 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00002068 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002069 CPUWatchpoint *wp;
Emilio G. Cota89fee742016-04-07 13:19:22 -04002070 uint32_t cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002071
Andreas Färberff4700b2013-08-26 18:23:18 +02002072 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00002073 /* We re-entered the check after replacing the TB. Now raise
2074 * the debug interrupt so that is will trigger after the
2075 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02002076 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00002077 return;
2078 }
Andreas Färber93afead2013-08-26 03:41:01 +02002079 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02002080 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01002081 if (cpu_watchpoint_address_matches(wp, vaddr, len)
2082 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01002083 if (flags == BP_MEM_READ) {
2084 wp->flags |= BP_WATCHPOINT_HIT_READ;
2085 } else {
2086 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
2087 }
2088 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01002089 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02002090 if (!cpu->watchpoint_hit) {
Sergey Fedorov568496c2016-02-11 11:17:32 +00002091 if (wp->flags & BP_CPU &&
2092 !cc->debug_check_watchpoint(cpu, wp)) {
2093 wp->flags &= ~BP_WATCHPOINT_HIT;
2094 continue;
2095 }
Andreas Färberff4700b2013-08-26 18:23:18 +02002096 cpu->watchpoint_hit = wp;
KONRAD Frederica5e99822016-10-27 16:10:06 +01002097
2098 /* The tb_lock will be reset when cpu_loop_exit or
2099 * cpu_loop_exit_noexc longjmp back into the cpu_exec
2100 * main loop.
2101 */
2102 tb_lock();
Andreas Färber239c51a2013-09-01 17:12:23 +02002103 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002104 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02002105 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02002106 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002107 } else {
2108 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02002109 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Peter Maydell6886b982016-05-17 15:18:04 +01002110 cpu_loop_exit_noexc(cpu);
aliguori6e140f22008-11-18 20:37:55 +00002111 }
aliguori06d55cc2008-11-18 20:24:06 +00002112 }
aliguori6e140f22008-11-18 20:37:55 +00002113 } else {
2114 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002115 }
2116 }
2117}
2118
pbrook6658ffb2007-03-16 23:58:11 +00002119/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2120 so these check for a hit then pass through to the normal out-of-line
2121 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01002122static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
2123 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00002124{
Peter Maydell66b9b432015-04-26 16:49:24 +01002125 MemTxResult res;
2126 uint64_t data;
Peter Maydell79ed0412016-01-21 14:15:06 +00002127 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2128 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
pbrook6658ffb2007-03-16 23:58:11 +00002129
Peter Maydell66b9b432015-04-26 16:49:24 +01002130 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02002131 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002132 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002133 data = address_space_ldub(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002134 break;
2135 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002136 data = address_space_lduw(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002137 break;
2138 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002139 data = address_space_ldl(as, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04002140 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002141 default: abort();
2142 }
Peter Maydell66b9b432015-04-26 16:49:24 +01002143 *pdata = data;
2144 return res;
2145}
2146
2147static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
2148 uint64_t val, unsigned size,
2149 MemTxAttrs attrs)
2150{
2151 MemTxResult res;
Peter Maydell79ed0412016-01-21 14:15:06 +00002152 int asidx = cpu_asidx_from_attrs(current_cpu, attrs);
2153 AddressSpace *as = current_cpu->cpu_ases[asidx].as;
Peter Maydell66b9b432015-04-26 16:49:24 +01002154
2155 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
2156 switch (size) {
2157 case 1:
Peter Maydell79ed0412016-01-21 14:15:06 +00002158 address_space_stb(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002159 break;
2160 case 2:
Peter Maydell79ed0412016-01-21 14:15:06 +00002161 address_space_stw(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002162 break;
2163 case 4:
Peter Maydell79ed0412016-01-21 14:15:06 +00002164 address_space_stl(as, addr, val, attrs, &res);
Peter Maydell66b9b432015-04-26 16:49:24 +01002165 break;
2166 default: abort();
2167 }
2168 return res;
pbrook6658ffb2007-03-16 23:58:11 +00002169}
2170
Avi Kivity1ec9b902012-01-02 12:47:48 +02002171static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01002172 .read_with_attrs = watch_mem_read,
2173 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02002174 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002175};
pbrook6658ffb2007-03-16 23:58:11 +00002176
Peter Maydellf25a49e2015-04-26 16:49:24 +01002177static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
2178 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002179{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002180 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002181 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01002182 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002183
blueswir1db7b5422007-05-26 17:36:03 +00002184#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002185 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002186 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00002187#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01002188 res = address_space_read(subpage->as, addr + subpage->base,
2189 attrs, buf, len);
2190 if (res) {
2191 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01002192 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002193 switch (len) {
2194 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002195 *data = ldub_p(buf);
2196 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002197 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002198 *data = lduw_p(buf);
2199 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002200 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002201 *data = ldl_p(buf);
2202 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002203 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01002204 *data = ldq_p(buf);
2205 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002206 default:
2207 abort();
2208 }
blueswir1db7b5422007-05-26 17:36:03 +00002209}
2210
Peter Maydellf25a49e2015-04-26 16:49:24 +01002211static MemTxResult subpage_write(void *opaque, hwaddr addr,
2212 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00002213{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002214 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002215 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002216
blueswir1db7b5422007-05-26 17:36:03 +00002217#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002218 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002219 " value %"PRIx64"\n",
2220 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00002221#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002222 switch (len) {
2223 case 1:
2224 stb_p(buf, value);
2225 break;
2226 case 2:
2227 stw_p(buf, value);
2228 break;
2229 case 4:
2230 stl_p(buf, value);
2231 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002232 case 8:
2233 stq_p(buf, value);
2234 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002235 default:
2236 abort();
2237 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002238 return address_space_write(subpage->as, addr + subpage->base,
2239 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002240}
2241
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002242static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002243 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002244{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002245 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002246#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002247 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002248 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002249#endif
2250
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002251 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002252 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002253}
2254
Avi Kivity70c68e42012-01-02 12:32:48 +02002255static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002256 .read_with_attrs = subpage_read,
2257 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002258 .impl.min_access_size = 1,
2259 .impl.max_access_size = 8,
2260 .valid.min_access_size = 1,
2261 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002262 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002263 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002264};
2265
Anthony Liguoric227f092009-10-01 16:12:16 -05002266static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002267 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002268{
2269 int idx, eidx;
2270
2271 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2272 return -1;
2273 idx = SUBPAGE_IDX(start);
2274 eidx = SUBPAGE_IDX(end);
2275#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002276 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2277 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002278#endif
blueswir1db7b5422007-05-26 17:36:03 +00002279 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002280 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002281 }
2282
2283 return 0;
2284}
2285
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002286static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002287{
Anthony Liguoric227f092009-10-01 16:12:16 -05002288 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002289
Vijaya Kumar K2615fab2016-10-24 16:26:49 +01002290 mmio = g_malloc0(sizeof(subpage_t) + TARGET_PAGE_SIZE * sizeof(uint16_t));
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002291 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002292 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002293 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002294 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002295 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002296#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002297 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2298 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002299#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002300 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002301
2302 return mmio;
2303}
2304
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002305static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2306 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002307{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002308 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002309 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002310 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002311 .mr = mr,
2312 .offset_within_address_space = 0,
2313 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002314 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002315 };
2316
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002317 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002318}
2319
Peter Maydella54c87b2016-01-21 14:15:05 +00002320MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index, MemTxAttrs attrs)
Avi Kivityaa102232012-03-08 17:06:55 +02002321{
Peter Maydella54c87b2016-01-21 14:15:05 +00002322 int asidx = cpu_asidx_from_attrs(cpu, attrs);
2323 CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
Peter Maydell32857f42015-10-01 15:29:50 +01002324 AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002325 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002326
2327 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002328}
2329
Avi Kivitye9179ce2009-06-14 11:38:52 +03002330static void io_mem_init(void)
2331{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002332 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002333 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002334 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002335 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002336 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002337 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002338 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002339}
2340
Avi Kivityac1970f2012-10-03 16:22:53 +02002341static void mem_begin(MemoryListener *listener)
2342{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002343 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002344 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2345 uint16_t n;
2346
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002347 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002348 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002349 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002350 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002351 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002352 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002353 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002354 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002355
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002356 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002357 d->as = as;
2358 as->next_dispatch = d;
2359}
2360
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002361static void address_space_dispatch_free(AddressSpaceDispatch *d)
2362{
2363 phys_sections_free(&d->map);
2364 g_free(d);
2365}
2366
Paolo Bonzini00752702013-05-29 12:13:54 +02002367static void mem_commit(MemoryListener *listener)
2368{
2369 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002370 AddressSpaceDispatch *cur = as->dispatch;
2371 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002372
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002373 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002374
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002375 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002376 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002377 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002378 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002379}
2380
Avi Kivity1d711482012-10-02 18:54:45 +02002381static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002382{
Peter Maydell32857f42015-10-01 15:29:50 +01002383 CPUAddressSpace *cpuas;
2384 AddressSpaceDispatch *d;
Avi Kivity117712c2012-02-12 21:23:17 +02002385
2386 /* since each CPU stores ram addresses in its TLB cache, we must
2387 reset the modified entries */
Peter Maydell32857f42015-10-01 15:29:50 +01002388 cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
2389 cpu_reloading_memory_map();
2390 /* The CPU and TLB are protected by the iothread lock.
2391 * We reload the dispatch pointer now because cpu_reloading_memory_map()
2392 * may have split the RCU critical section.
2393 */
2394 d = atomic_rcu_read(&cpuas->as->dispatch);
Alex Bennéef35e44e2016-10-21 16:34:18 +01002395 atomic_rcu_set(&cpuas->memory_dispatch, d);
Peter Maydell32857f42015-10-01 15:29:50 +01002396 tlb_flush(cpuas->cpu, 1);
Avi Kivity50c1e142012-02-08 21:36:02 +02002397}
2398
Avi Kivityac1970f2012-10-03 16:22:53 +02002399void address_space_init_dispatch(AddressSpace *as)
2400{
Paolo Bonzini00752702013-05-29 12:13:54 +02002401 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002402 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002403 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002404 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002405 .region_add = mem_add,
2406 .region_nop = mem_add,
2407 .priority = 0,
2408 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002409 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002410}
2411
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002412void address_space_unregister(AddressSpace *as)
2413{
2414 memory_listener_unregister(&as->dispatch_listener);
2415}
2416
Avi Kivity83f3c252012-10-07 12:59:55 +02002417void address_space_destroy_dispatch(AddressSpace *as)
2418{
2419 AddressSpaceDispatch *d = as->dispatch;
2420
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002421 atomic_rcu_set(&as->dispatch, NULL);
2422 if (d) {
2423 call_rcu(d, address_space_dispatch_free, rcu);
2424 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002425}
2426
Avi Kivity62152b82011-07-26 14:26:14 +03002427static void memory_map_init(void)
2428{
Anthony Liguori7267c092011-08-20 22:09:37 -05002429 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002430
Paolo Bonzini57271d62013-11-07 17:14:37 +01002431 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002432 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002433
Anthony Liguori7267c092011-08-20 22:09:37 -05002434 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b72013-09-02 18:43:30 +02002435 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2436 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002437 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity62152b82011-07-26 14:26:14 +03002438}
2439
2440MemoryRegion *get_system_memory(void)
2441{
2442 return system_memory;
2443}
2444
Avi Kivity309cb472011-08-08 16:09:03 +03002445MemoryRegion *get_system_io(void)
2446{
2447 return system_io;
2448}
2449
pbrooke2eef172008-06-08 01:09:01 +00002450#endif /* !defined(CONFIG_USER_ONLY) */
2451
bellard13eb76e2004-01-24 15:23:36 +00002452/* physical memory access (slow version, mainly for debug) */
2453#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002454int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002455 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002456{
2457 int l, flags;
2458 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002459 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002460
2461 while (len > 0) {
2462 page = addr & TARGET_PAGE_MASK;
2463 l = (page + TARGET_PAGE_SIZE) - addr;
2464 if (l > len)
2465 l = len;
2466 flags = page_get_flags(page);
2467 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002468 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002469 if (is_write) {
2470 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002471 return -1;
bellard579a97f2007-11-11 14:26:47 +00002472 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002473 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002474 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002475 memcpy(p, buf, l);
2476 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002477 } else {
2478 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002479 return -1;
bellard579a97f2007-11-11 14:26:47 +00002480 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002481 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002482 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002483 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002484 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002485 }
2486 len -= l;
2487 buf += l;
2488 addr += l;
2489 }
Paul Brooka68fe892010-03-01 00:08:59 +00002490 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002491}
bellard8df1cd02005-01-28 22:37:22 +00002492
bellard13eb76e2004-01-24 15:23:36 +00002493#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002494
Paolo Bonzini845b6212015-03-23 11:45:53 +01002495static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
Avi Kivitya8170e52012-10-23 12:30:10 +02002496 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002497{
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002498 uint8_t dirty_log_mask = memory_region_get_dirty_log_mask(mr);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002499 addr += memory_region_get_ram_addr(mr);
2500
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002501 /* No early return if dirty_log_mask is or becomes 0, because
2502 * cpu_physical_memory_set_dirty_range will still call
2503 * xen_modified_memory.
2504 */
2505 if (dirty_log_mask) {
2506 dirty_log_mask =
2507 cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002508 }
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002509 if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
Alex Bennéeba051fb2016-10-27 16:10:16 +01002510 tb_lock();
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002511 tb_invalidate_phys_range(addr, addr + length);
Alex Bennéeba051fb2016-10-27 16:10:16 +01002512 tb_unlock();
Paolo Bonzinie87f7772015-03-25 15:21:39 +01002513 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
2514 }
2515 cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002516}
2517
Richard Henderson23326162013-07-08 14:55:59 -07002518static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002519{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002520 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002521
2522 /* Regions are assumed to support 1-4 byte accesses unless
2523 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002524 if (access_size_max == 0) {
2525 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002526 }
Richard Henderson23326162013-07-08 14:55:59 -07002527
2528 /* Bound the maximum access by the alignment of the address. */
2529 if (!mr->ops->impl.unaligned) {
2530 unsigned align_size_max = addr & -addr;
2531 if (align_size_max != 0 && align_size_max < access_size_max) {
2532 access_size_max = align_size_max;
2533 }
2534 }
2535
2536 /* Don't attempt accesses larger than the maximum. */
2537 if (l > access_size_max) {
2538 l = access_size_max;
2539 }
Peter Maydell6554f5c2015-07-24 13:33:10 +01002540 l = pow2floor(l);
Richard Henderson23326162013-07-08 14:55:59 -07002541
2542 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002543}
2544
Jan Kiszka4840f102015-06-18 18:47:22 +02002545static bool prepare_mmio_access(MemoryRegion *mr)
Paolo Bonzini125b3802015-06-18 18:47:21 +02002546{
Jan Kiszka4840f102015-06-18 18:47:22 +02002547 bool unlocked = !qemu_mutex_iothread_locked();
2548 bool release_lock = false;
2549
2550 if (unlocked && mr->global_locking) {
2551 qemu_mutex_lock_iothread();
2552 unlocked = false;
2553 release_lock = true;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002554 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002555 if (mr->flush_coalesced_mmio) {
2556 if (unlocked) {
2557 qemu_mutex_lock_iothread();
2558 }
2559 qemu_flush_coalesced_mmio_buffer();
2560 if (unlocked) {
2561 qemu_mutex_unlock_iothread();
2562 }
2563 }
2564
2565 return release_lock;
Paolo Bonzini125b3802015-06-18 18:47:21 +02002566}
2567
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002568/* Called within RCU critical section. */
2569static MemTxResult address_space_write_continue(AddressSpace *as, hwaddr addr,
2570 MemTxAttrs attrs,
2571 const uint8_t *buf,
2572 int len, hwaddr addr1,
2573 hwaddr l, MemoryRegion *mr)
bellard13eb76e2004-01-24 15:23:36 +00002574{
bellard13eb76e2004-01-24 15:23:36 +00002575 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002576 uint64_t val;
Peter Maydell3b643492015-04-26 16:49:23 +01002577 MemTxResult result = MEMTX_OK;
Jan Kiszka4840f102015-06-18 18:47:22 +02002578 bool release_lock = false;
ths3b46e622007-09-17 08:09:54 +00002579
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002580 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002581 if (!memory_access_is_direct(mr, true)) {
2582 release_lock |= prepare_mmio_access(mr);
2583 l = memory_access_size(mr, l, addr1);
2584 /* XXX: could force current_cpu to NULL to avoid
2585 potential bugs */
2586 switch (l) {
2587 case 8:
2588 /* 64 bit write access */
2589 val = ldq_p(buf);
2590 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2591 attrs);
2592 break;
2593 case 4:
2594 /* 32 bit write access */
2595 val = ldl_p(buf);
2596 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2597 attrs);
2598 break;
2599 case 2:
2600 /* 16 bit write access */
2601 val = lduw_p(buf);
2602 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2603 attrs);
2604 break;
2605 case 1:
2606 /* 8 bit write access */
2607 val = ldub_p(buf);
2608 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2609 attrs);
2610 break;
2611 default:
2612 abort();
bellard13eb76e2004-01-24 15:23:36 +00002613 }
2614 } else {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002615 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002616 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002617 memcpy(ptr, buf, l);
2618 invalidate_and_set_dirty(mr, addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002619 }
Jan Kiszka4840f102015-06-18 18:47:22 +02002620
2621 if (release_lock) {
2622 qemu_mutex_unlock_iothread();
2623 release_lock = false;
2624 }
2625
bellard13eb76e2004-01-24 15:23:36 +00002626 len -= l;
2627 buf += l;
2628 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002629
2630 if (!len) {
2631 break;
2632 }
2633
2634 l = len;
2635 mr = address_space_translate(as, addr, &addr1, &l, true);
bellard13eb76e2004-01-24 15:23:36 +00002636 }
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002637
Peter Maydell3b643492015-04-26 16:49:23 +01002638 return result;
bellard13eb76e2004-01-24 15:23:36 +00002639}
bellard8df1cd02005-01-28 22:37:22 +00002640
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002641MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2642 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002643{
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002644 hwaddr l;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002645 hwaddr addr1;
2646 MemoryRegion *mr;
2647 MemTxResult result = MEMTX_OK;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002648
2649 if (len > 0) {
2650 rcu_read_lock();
2651 l = len;
2652 mr = address_space_translate(as, addr, &addr1, &l, true);
2653 result = address_space_write_continue(as, addr, attrs, buf, len,
2654 addr1, l, mr);
2655 rcu_read_unlock();
2656 }
2657
2658 return result;
2659}
2660
2661/* Called within RCU critical section. */
2662MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
2663 MemTxAttrs attrs, uint8_t *buf,
2664 int len, hwaddr addr1, hwaddr l,
2665 MemoryRegion *mr)
2666{
2667 uint8_t *ptr;
2668 uint64_t val;
2669 MemTxResult result = MEMTX_OK;
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002670 bool release_lock = false;
2671
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002672 for (;;) {
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002673 if (!memory_access_is_direct(mr, false)) {
2674 /* I/O case */
2675 release_lock |= prepare_mmio_access(mr);
2676 l = memory_access_size(mr, l, addr1);
2677 switch (l) {
2678 case 8:
2679 /* 64 bit read access */
2680 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2681 attrs);
2682 stq_p(buf, val);
2683 break;
2684 case 4:
2685 /* 32 bit read access */
2686 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2687 attrs);
2688 stl_p(buf, val);
2689 break;
2690 case 2:
2691 /* 16 bit read access */
2692 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2693 attrs);
2694 stw_p(buf, val);
2695 break;
2696 case 1:
2697 /* 8 bit read access */
2698 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2699 attrs);
2700 stb_p(buf, val);
2701 break;
2702 default:
2703 abort();
2704 }
2705 } else {
2706 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002707 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002708 memcpy(buf, ptr, l);
2709 }
2710
2711 if (release_lock) {
2712 qemu_mutex_unlock_iothread();
2713 release_lock = false;
2714 }
2715
2716 len -= l;
2717 buf += l;
2718 addr += l;
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002719
2720 if (!len) {
2721 break;
2722 }
2723
2724 l = len;
2725 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002726 }
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002727
2728 return result;
2729}
2730
Paolo Bonzini3cc8f882015-12-09 10:34:13 +01002731MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2732 MemTxAttrs attrs, uint8_t *buf, int len)
Paolo Bonzinia203ac72015-12-09 10:18:57 +01002733{
2734 hwaddr l;
2735 hwaddr addr1;
2736 MemoryRegion *mr;
2737 MemTxResult result = MEMTX_OK;
2738
2739 if (len > 0) {
2740 rcu_read_lock();
2741 l = len;
2742 mr = address_space_translate(as, addr, &addr1, &l, false);
2743 result = address_space_read_continue(as, addr, attrs, buf, len,
2744 addr1, l, mr);
2745 rcu_read_unlock();
2746 }
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002747
2748 return result;
Avi Kivityac1970f2012-10-03 16:22:53 +02002749}
2750
Paolo Bonzinieb7eeb82015-12-09 10:06:31 +01002751MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2752 uint8_t *buf, int len, bool is_write)
2753{
2754 if (is_write) {
2755 return address_space_write(as, addr, attrs, (uint8_t *)buf, len);
2756 } else {
2757 return address_space_read(as, addr, attrs, (uint8_t *)buf, len);
2758 }
2759}
Avi Kivityac1970f2012-10-03 16:22:53 +02002760
Avi Kivitya8170e52012-10-23 12:30:10 +02002761void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002762 int len, int is_write)
2763{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002764 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2765 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002766}
2767
Alexander Graf582b55a2013-12-11 14:17:44 +01002768enum write_rom_type {
2769 WRITE_DATA,
2770 FLUSH_CACHE,
2771};
2772
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002773static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002774 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002775{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002776 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002777 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002778 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002779 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002780
Paolo Bonzini41063e12015-03-18 14:21:43 +01002781 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002782 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002783 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002784 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002785
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002786 if (!(memory_region_is_ram(mr) ||
2787 memory_region_is_romd(mr))) {
Paolo Bonzinib242e0e2015-07-04 00:24:51 +02002788 l = memory_access_size(mr, l, addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00002789 } else {
bellardd0ecd2a2006-04-23 17:14:48 +00002790 /* ROM/RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01002791 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002792 switch (type) {
2793 case WRITE_DATA:
2794 memcpy(ptr, buf, l);
Paolo Bonzini845b6212015-03-23 11:45:53 +01002795 invalidate_and_set_dirty(mr, addr1, l);
Alexander Graf582b55a2013-12-11 14:17:44 +01002796 break;
2797 case FLUSH_CACHE:
2798 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2799 break;
2800 }
bellardd0ecd2a2006-04-23 17:14:48 +00002801 }
2802 len -= l;
2803 buf += l;
2804 addr += l;
2805 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002806 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002807}
2808
Alexander Graf582b55a2013-12-11 14:17:44 +01002809/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002810void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002811 const uint8_t *buf, int len)
2812{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002813 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002814}
2815
2816void cpu_flush_icache_range(hwaddr start, int len)
2817{
2818 /*
2819 * This function should do the same thing as an icache flush that was
2820 * triggered from within the guest. For TCG we are always cache coherent,
2821 * so there is no need to flush anything. For KVM / Xen we need to flush
2822 * the host's instruction cache at least.
2823 */
2824 if (tcg_enabled()) {
2825 return;
2826 }
2827
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002828 cpu_physical_memory_write_rom_internal(&address_space_memory,
2829 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002830}
2831
aliguori6d16c2f2009-01-22 16:59:11 +00002832typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002833 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002834 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002835 hwaddr addr;
2836 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002837 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002838} BounceBuffer;
2839
2840static BounceBuffer bounce;
2841
aliguoriba223c22009-01-22 16:59:16 +00002842typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002843 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002844 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002845} MapClient;
2846
Fam Zheng38e047b2015-03-16 17:03:35 +08002847QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002848static QLIST_HEAD(map_client_list, MapClient) map_client_list
2849 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002850
Fam Zhenge95205e2015-03-16 17:03:37 +08002851static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002852{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002853 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002854 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002855}
2856
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002857static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002858{
2859 MapClient *client;
2860
Blue Swirl72cf2d42009-09-12 07:36:22 +00002861 while (!QLIST_EMPTY(&map_client_list)) {
2862 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002863 qemu_bh_schedule(client->bh);
2864 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002865 }
2866}
2867
Fam Zhenge95205e2015-03-16 17:03:37 +08002868void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002869{
2870 MapClient *client = g_malloc(sizeof(*client));
2871
Fam Zheng38e047b2015-03-16 17:03:35 +08002872 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002873 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002874 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002875 if (!atomic_read(&bounce.in_use)) {
2876 cpu_notify_map_clients_locked();
2877 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002878 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002879}
2880
Fam Zheng38e047b2015-03-16 17:03:35 +08002881void cpu_exec_init_all(void)
2882{
2883 qemu_mutex_init(&ram_list.mutex);
Peter Maydell20bccb82016-10-24 16:26:49 +01002884 /* The data structures we set up here depend on knowing the page size,
2885 * so no more changes can be made after this point.
2886 * In an ideal world, nothing we did before we had finished the
2887 * machine setup would care about the target page size, and we could
2888 * do this much later, rather than requiring board models to state
2889 * up front what their requirements are.
2890 */
2891 finalize_target_page_bits();
Fam Zheng38e047b2015-03-16 17:03:35 +08002892 io_mem_init();
Paolo Bonzini680a4782015-11-02 09:23:52 +01002893 memory_map_init();
Fam Zheng38e047b2015-03-16 17:03:35 +08002894 qemu_mutex_init(&map_client_list_lock);
2895}
2896
Fam Zhenge95205e2015-03-16 17:03:37 +08002897void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002898{
Fam Zhenge95205e2015-03-16 17:03:37 +08002899 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002900
Fam Zhenge95205e2015-03-16 17:03:37 +08002901 qemu_mutex_lock(&map_client_list_lock);
2902 QLIST_FOREACH(client, &map_client_list, link) {
2903 if (client->bh == bh) {
2904 cpu_unregister_map_client_do(client);
2905 break;
2906 }
2907 }
2908 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002909}
2910
2911static void cpu_notify_map_clients(void)
2912{
Fam Zheng38e047b2015-03-16 17:03:35 +08002913 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002914 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002915 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002916}
2917
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002918bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2919{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002920 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002921 hwaddr l, xlat;
2922
Paolo Bonzini41063e12015-03-18 14:21:43 +01002923 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002924 while (len > 0) {
2925 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002926 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2927 if (!memory_access_is_direct(mr, is_write)) {
2928 l = memory_access_size(mr, l, addr);
2929 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002930 return false;
2931 }
2932 }
2933
2934 len -= l;
2935 addr += l;
2936 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002937 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002938 return true;
2939}
2940
aliguori6d16c2f2009-01-22 16:59:11 +00002941/* Map a physical memory region into a host virtual address.
2942 * May map a subset of the requested range, given by and returned in *plen.
2943 * May return NULL if resources needed to perform the mapping are exhausted.
2944 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002945 * Use cpu_register_map_client() to know when retrying the map operation is
2946 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002947 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002948void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002949 hwaddr addr,
2950 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002951 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002952{
Avi Kivitya8170e52012-10-23 12:30:10 +02002953 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002954 hwaddr done = 0;
2955 hwaddr l, xlat, base;
2956 MemoryRegion *mr, *this_mr;
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01002957 void *ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00002958
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002959 if (len == 0) {
2960 return NULL;
2961 }
aliguori6d16c2f2009-01-22 16:59:11 +00002962
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002963 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002964 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002965 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002966
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002967 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002968 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002969 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002970 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002971 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002972 /* Avoid unbounded allocations */
2973 l = MIN(l, TARGET_PAGE_SIZE);
2974 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002975 bounce.addr = addr;
2976 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002977
2978 memory_region_ref(mr);
2979 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002980 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002981 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2982 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002983 }
aliguori6d16c2f2009-01-22 16:59:11 +00002984
Paolo Bonzini41063e12015-03-18 14:21:43 +01002985 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002986 *plen = l;
2987 return bounce.buffer;
2988 }
2989
2990 base = xlat;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002991
2992 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002993 len -= l;
2994 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002995 done += l;
2996 if (len == 0) {
2997 break;
2998 }
2999
3000 l = len;
3001 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
3002 if (this_mr != mr || xlat != base + done) {
3003 break;
3004 }
aliguori6d16c2f2009-01-22 16:59:11 +00003005 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003006
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003007 memory_region_ref(mr);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02003008 *plen = done;
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003009 ptr = qemu_ram_ptr_length(mr->ram_block, base, plen);
Paolo Bonzinie81bcda2015-12-16 10:31:26 +01003010 rcu_read_unlock();
3011
3012 return ptr;
aliguori6d16c2f2009-01-22 16:59:11 +00003013}
3014
Avi Kivityac1970f2012-10-03 16:22:53 +02003015/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003016 * Will also mark the memory as dirty if is_write == 1. access_len gives
3017 * the amount of memory that was actually read or written by the caller.
3018 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003019void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3020 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003021{
3022 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003023 MemoryRegion *mr;
3024 ram_addr_t addr1;
3025
Paolo Bonzini07bdaa42016-03-25 12:55:08 +01003026 mr = memory_region_from_host(buffer, &addr1);
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003027 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00003028 if (is_write) {
Paolo Bonzini845b6212015-03-23 11:45:53 +01003029 invalidate_and_set_dirty(mr, addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003030 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003031 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003032 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003033 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003034 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00003035 return;
3036 }
3037 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003038 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
3039 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003040 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003041 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003042 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02003043 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08003044 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00003045 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003046}
bellardd0ecd2a2006-04-23 17:14:48 +00003047
Avi Kivitya8170e52012-10-23 12:30:10 +02003048void *cpu_physical_memory_map(hwaddr addr,
3049 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003050 int is_write)
3051{
3052 return address_space_map(&address_space_memory, addr, plen, is_write);
3053}
3054
Avi Kivitya8170e52012-10-23 12:30:10 +02003055void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3056 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003057{
3058 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3059}
3060
bellard8df1cd02005-01-28 22:37:22 +00003061/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003062static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
3063 MemTxAttrs attrs,
3064 MemTxResult *result,
3065 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003066{
bellard8df1cd02005-01-28 22:37:22 +00003067 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02003068 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003069 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003070 hwaddr l = 4;
3071 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003072 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003073 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003074
Paolo Bonzini41063e12015-03-18 14:21:43 +01003075 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003076 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003077 if (l < 4 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003078 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003079
bellard8df1cd02005-01-28 22:37:22 +00003080 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003081 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003082#if defined(TARGET_WORDS_BIGENDIAN)
3083 if (endian == DEVICE_LITTLE_ENDIAN) {
3084 val = bswap32(val);
3085 }
3086#else
3087 if (endian == DEVICE_BIG_ENDIAN) {
3088 val = bswap32(val);
3089 }
3090#endif
bellard8df1cd02005-01-28 22:37:22 +00003091 } else {
3092 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003093 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003094 switch (endian) {
3095 case DEVICE_LITTLE_ENDIAN:
3096 val = ldl_le_p(ptr);
3097 break;
3098 case DEVICE_BIG_ENDIAN:
3099 val = ldl_be_p(ptr);
3100 break;
3101 default:
3102 val = ldl_p(ptr);
3103 break;
3104 }
Peter Maydell50013112015-04-26 16:49:24 +01003105 r = MEMTX_OK;
3106 }
3107 if (result) {
3108 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003109 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003110 if (release_lock) {
3111 qemu_mutex_unlock_iothread();
3112 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003113 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003114 return val;
3115}
3116
Peter Maydell50013112015-04-26 16:49:24 +01003117uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
3118 MemTxAttrs attrs, MemTxResult *result)
3119{
3120 return address_space_ldl_internal(as, addr, attrs, result,
3121 DEVICE_NATIVE_ENDIAN);
3122}
3123
3124uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
3125 MemTxAttrs attrs, MemTxResult *result)
3126{
3127 return address_space_ldl_internal(as, addr, attrs, result,
3128 DEVICE_LITTLE_ENDIAN);
3129}
3130
3131uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
3132 MemTxAttrs attrs, MemTxResult *result)
3133{
3134 return address_space_ldl_internal(as, addr, attrs, result,
3135 DEVICE_BIG_ENDIAN);
3136}
3137
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003138uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003139{
Peter Maydell50013112015-04-26 16:49:24 +01003140 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003141}
3142
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003143uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003144{
Peter Maydell50013112015-04-26 16:49:24 +01003145 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003146}
3147
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01003148uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003149{
Peter Maydell50013112015-04-26 16:49:24 +01003150 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003151}
3152
bellard84b7b8e2005-11-28 21:19:04 +00003153/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003154static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
3155 MemTxAttrs attrs,
3156 MemTxResult *result,
3157 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003158{
bellard84b7b8e2005-11-28 21:19:04 +00003159 uint8_t *ptr;
3160 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003161 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003162 hwaddr l = 8;
3163 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003164 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003165 bool release_lock = false;
bellard84b7b8e2005-11-28 21:19:04 +00003166
Paolo Bonzini41063e12015-03-18 14:21:43 +01003167 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003168 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003169 false);
3170 if (l < 8 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003171 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003172
bellard84b7b8e2005-11-28 21:19:04 +00003173 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003174 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02003175#if defined(TARGET_WORDS_BIGENDIAN)
3176 if (endian == DEVICE_LITTLE_ENDIAN) {
3177 val = bswap64(val);
3178 }
3179#else
3180 if (endian == DEVICE_BIG_ENDIAN) {
3181 val = bswap64(val);
3182 }
3183#endif
bellard84b7b8e2005-11-28 21:19:04 +00003184 } else {
3185 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003186 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003187 switch (endian) {
3188 case DEVICE_LITTLE_ENDIAN:
3189 val = ldq_le_p(ptr);
3190 break;
3191 case DEVICE_BIG_ENDIAN:
3192 val = ldq_be_p(ptr);
3193 break;
3194 default:
3195 val = ldq_p(ptr);
3196 break;
3197 }
Peter Maydell50013112015-04-26 16:49:24 +01003198 r = MEMTX_OK;
3199 }
3200 if (result) {
3201 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00003202 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003203 if (release_lock) {
3204 qemu_mutex_unlock_iothread();
3205 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003206 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00003207 return val;
3208}
3209
Peter Maydell50013112015-04-26 16:49:24 +01003210uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
3211 MemTxAttrs attrs, MemTxResult *result)
3212{
3213 return address_space_ldq_internal(as, addr, attrs, result,
3214 DEVICE_NATIVE_ENDIAN);
3215}
3216
3217uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
3218 MemTxAttrs attrs, MemTxResult *result)
3219{
3220 return address_space_ldq_internal(as, addr, attrs, result,
3221 DEVICE_LITTLE_ENDIAN);
3222}
3223
3224uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
3225 MemTxAttrs attrs, MemTxResult *result)
3226{
3227 return address_space_ldq_internal(as, addr, attrs, result,
3228 DEVICE_BIG_ENDIAN);
3229}
3230
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003231uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003232{
Peter Maydell50013112015-04-26 16:49:24 +01003233 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003234}
3235
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003236uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003237{
Peter Maydell50013112015-04-26 16:49:24 +01003238 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003239}
3240
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10003241uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003242{
Peter Maydell50013112015-04-26 16:49:24 +01003243 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003244}
3245
Peter Maydell50013112015-04-26 16:49:24 +01003246uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
3247 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003248{
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003249 uint8_t *ptr;
3250 uint64_t val;
3251 MemoryRegion *mr;
3252 hwaddr l = 1;
3253 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003254 MemTxResult r;
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003255 bool release_lock = false;
Peter Maydell50013112015-04-26 16:49:24 +01003256
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003257 rcu_read_lock();
3258 mr = address_space_translate(as, addr, &addr1, &l, false);
3259 if (!memory_access_is_direct(mr, false)) {
3260 release_lock |= prepare_mmio_access(mr);
3261
3262 /* I/O case */
3263 r = memory_region_dispatch_read(mr, addr1, &val, 1, attrs);
3264 } else {
3265 /* RAM case */
3266 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3267 val = ldub_p(ptr);
3268 r = MEMTX_OK;
3269 }
Peter Maydell50013112015-04-26 16:49:24 +01003270 if (result) {
3271 *result = r;
3272 }
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003273 if (release_lock) {
3274 qemu_mutex_unlock_iothread();
3275 }
3276 rcu_read_unlock();
bellardaab33092005-10-30 20:48:42 +00003277 return val;
3278}
3279
Peter Maydell50013112015-04-26 16:49:24 +01003280uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
3281{
3282 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
3283}
3284
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003285/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003286static inline uint32_t address_space_lduw_internal(AddressSpace *as,
3287 hwaddr addr,
3288 MemTxAttrs attrs,
3289 MemTxResult *result,
3290 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003291{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003292 uint8_t *ptr;
3293 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003294 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003295 hwaddr l = 2;
3296 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003297 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003298 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003299
Paolo Bonzini41063e12015-03-18 14:21:43 +01003300 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003301 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003302 false);
3303 if (l < 2 || !memory_access_is_direct(mr, false)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003304 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003305
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003306 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01003307 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003308#if defined(TARGET_WORDS_BIGENDIAN)
3309 if (endian == DEVICE_LITTLE_ENDIAN) {
3310 val = bswap16(val);
3311 }
3312#else
3313 if (endian == DEVICE_BIG_ENDIAN) {
3314 val = bswap16(val);
3315 }
3316#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003317 } else {
3318 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003319 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003320 switch (endian) {
3321 case DEVICE_LITTLE_ENDIAN:
3322 val = lduw_le_p(ptr);
3323 break;
3324 case DEVICE_BIG_ENDIAN:
3325 val = lduw_be_p(ptr);
3326 break;
3327 default:
3328 val = lduw_p(ptr);
3329 break;
3330 }
Peter Maydell50013112015-04-26 16:49:24 +01003331 r = MEMTX_OK;
3332 }
3333 if (result) {
3334 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003335 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003336 if (release_lock) {
3337 qemu_mutex_unlock_iothread();
3338 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003339 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003340 return val;
bellardaab33092005-10-30 20:48:42 +00003341}
3342
Peter Maydell50013112015-04-26 16:49:24 +01003343uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
3344 MemTxAttrs attrs, MemTxResult *result)
3345{
3346 return address_space_lduw_internal(as, addr, attrs, result,
3347 DEVICE_NATIVE_ENDIAN);
3348}
3349
3350uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
3351 MemTxAttrs attrs, MemTxResult *result)
3352{
3353 return address_space_lduw_internal(as, addr, attrs, result,
3354 DEVICE_LITTLE_ENDIAN);
3355}
3356
3357uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
3358 MemTxAttrs attrs, MemTxResult *result)
3359{
3360 return address_space_lduw_internal(as, addr, attrs, result,
3361 DEVICE_BIG_ENDIAN);
3362}
3363
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003364uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003365{
Peter Maydell50013112015-04-26 16:49:24 +01003366 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003367}
3368
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003369uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003370{
Peter Maydell50013112015-04-26 16:49:24 +01003371 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003372}
3373
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003374uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003375{
Peter Maydell50013112015-04-26 16:49:24 +01003376 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003377}
3378
bellard8df1cd02005-01-28 22:37:22 +00003379/* warning: addr must be aligned. The ram page is not masked as dirty
3380 and the code inside is not invalidated. It is useful if the dirty
3381 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003382void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3383 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003384{
bellard8df1cd02005-01-28 22:37:22 +00003385 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003386 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003387 hwaddr l = 4;
3388 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003389 MemTxResult r;
Paolo Bonzini845b6212015-03-23 11:45:53 +01003390 uint8_t dirty_log_mask;
Jan Kiszka4840f102015-06-18 18:47:22 +02003391 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003392
Paolo Bonzini41063e12015-03-18 14:21:43 +01003393 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003394 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003395 true);
3396 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003397 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003398
Peter Maydell50013112015-04-26 16:49:24 +01003399 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003400 } else {
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003401 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
bellard8df1cd02005-01-28 22:37:22 +00003402 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003403
Paolo Bonzini845b6212015-03-23 11:45:53 +01003404 dirty_log_mask = memory_region_get_dirty_log_mask(mr);
3405 dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003406 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
3407 4, dirty_log_mask);
Peter Maydell50013112015-04-26 16:49:24 +01003408 r = MEMTX_OK;
3409 }
3410 if (result) {
3411 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003412 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003413 if (release_lock) {
3414 qemu_mutex_unlock_iothread();
3415 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003416 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003417}
3418
Peter Maydell50013112015-04-26 16:49:24 +01003419void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3420{
3421 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3422}
3423
bellard8df1cd02005-01-28 22:37:22 +00003424/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003425static inline void address_space_stl_internal(AddressSpace *as,
3426 hwaddr addr, uint32_t val,
3427 MemTxAttrs attrs,
3428 MemTxResult *result,
3429 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003430{
bellard8df1cd02005-01-28 22:37:22 +00003431 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003432 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003433 hwaddr l = 4;
3434 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003435 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003436 bool release_lock = false;
bellard8df1cd02005-01-28 22:37:22 +00003437
Paolo Bonzini41063e12015-03-18 14:21:43 +01003438 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003439 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003440 true);
3441 if (l < 4 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003442 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003443
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003444#if defined(TARGET_WORDS_BIGENDIAN)
3445 if (endian == DEVICE_LITTLE_ENDIAN) {
3446 val = bswap32(val);
3447 }
3448#else
3449 if (endian == DEVICE_BIG_ENDIAN) {
3450 val = bswap32(val);
3451 }
3452#endif
Peter Maydell50013112015-04-26 16:49:24 +01003453 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003454 } else {
bellard8df1cd02005-01-28 22:37:22 +00003455 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003456 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003457 switch (endian) {
3458 case DEVICE_LITTLE_ENDIAN:
3459 stl_le_p(ptr, val);
3460 break;
3461 case DEVICE_BIG_ENDIAN:
3462 stl_be_p(ptr, val);
3463 break;
3464 default:
3465 stl_p(ptr, val);
3466 break;
3467 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003468 invalidate_and_set_dirty(mr, addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003469 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003470 }
Peter Maydell50013112015-04-26 16:49:24 +01003471 if (result) {
3472 *result = r;
3473 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003474 if (release_lock) {
3475 qemu_mutex_unlock_iothread();
3476 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003477 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003478}
3479
3480void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3481 MemTxAttrs attrs, MemTxResult *result)
3482{
3483 address_space_stl_internal(as, addr, val, attrs, result,
3484 DEVICE_NATIVE_ENDIAN);
3485}
3486
3487void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3488 MemTxAttrs attrs, MemTxResult *result)
3489{
3490 address_space_stl_internal(as, addr, val, attrs, result,
3491 DEVICE_LITTLE_ENDIAN);
3492}
3493
3494void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3495 MemTxAttrs attrs, MemTxResult *result)
3496{
3497 address_space_stl_internal(as, addr, val, attrs, result,
3498 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003499}
3500
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003501void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003502{
Peter Maydell50013112015-04-26 16:49:24 +01003503 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003504}
3505
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003506void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003507{
Peter Maydell50013112015-04-26 16:49:24 +01003508 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003509}
3510
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003511void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003512{
Peter Maydell50013112015-04-26 16:49:24 +01003513 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003514}
3515
Peter Maydell50013112015-04-26 16:49:24 +01003516void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3517 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003518{
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003519 uint8_t *ptr;
3520 MemoryRegion *mr;
3521 hwaddr l = 1;
3522 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003523 MemTxResult r;
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003524 bool release_lock = false;
Peter Maydell50013112015-04-26 16:49:24 +01003525
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003526 rcu_read_lock();
3527 mr = address_space_translate(as, addr, &addr1, &l, true);
3528 if (!memory_access_is_direct(mr, true)) {
3529 release_lock |= prepare_mmio_access(mr);
3530 r = memory_region_dispatch_write(mr, addr1, val, 1, attrs);
3531 } else {
3532 /* RAM case */
3533 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3534 stb_p(ptr, val);
3535 invalidate_and_set_dirty(mr, addr1, 1);
3536 r = MEMTX_OK;
3537 }
Peter Maydell50013112015-04-26 16:49:24 +01003538 if (result) {
3539 *result = r;
3540 }
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003541 if (release_lock) {
3542 qemu_mutex_unlock_iothread();
3543 }
3544 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003545}
3546
3547void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3548{
3549 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003550}
3551
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003552/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003553static inline void address_space_stw_internal(AddressSpace *as,
3554 hwaddr addr, uint32_t val,
3555 MemTxAttrs attrs,
3556 MemTxResult *result,
3557 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003558{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003559 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003560 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003561 hwaddr l = 2;
3562 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003563 MemTxResult r;
Jan Kiszka4840f102015-06-18 18:47:22 +02003564 bool release_lock = false;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003565
Paolo Bonzini41063e12015-03-18 14:21:43 +01003566 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003567 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003568 if (l < 2 || !memory_access_is_direct(mr, true)) {
Jan Kiszka4840f102015-06-18 18:47:22 +02003569 release_lock |= prepare_mmio_access(mr);
Paolo Bonzini125b3802015-06-18 18:47:21 +02003570
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003571#if defined(TARGET_WORDS_BIGENDIAN)
3572 if (endian == DEVICE_LITTLE_ENDIAN) {
3573 val = bswap16(val);
3574 }
3575#else
3576 if (endian == DEVICE_BIG_ENDIAN) {
3577 val = bswap16(val);
3578 }
3579#endif
Peter Maydell50013112015-04-26 16:49:24 +01003580 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003581 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003582 /* RAM case */
Paolo Bonzini0878d0e2016-02-22 11:02:12 +01003583 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003584 switch (endian) {
3585 case DEVICE_LITTLE_ENDIAN:
3586 stw_le_p(ptr, val);
3587 break;
3588 case DEVICE_BIG_ENDIAN:
3589 stw_be_p(ptr, val);
3590 break;
3591 default:
3592 stw_p(ptr, val);
3593 break;
3594 }
Paolo Bonzini845b6212015-03-23 11:45:53 +01003595 invalidate_and_set_dirty(mr, addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003596 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003597 }
Peter Maydell50013112015-04-26 16:49:24 +01003598 if (result) {
3599 *result = r;
3600 }
Jan Kiszka4840f102015-06-18 18:47:22 +02003601 if (release_lock) {
3602 qemu_mutex_unlock_iothread();
3603 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003604 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003605}
3606
3607void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3608 MemTxAttrs attrs, MemTxResult *result)
3609{
3610 address_space_stw_internal(as, addr, val, attrs, result,
3611 DEVICE_NATIVE_ENDIAN);
3612}
3613
3614void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3615 MemTxAttrs attrs, MemTxResult *result)
3616{
3617 address_space_stw_internal(as, addr, val, attrs, result,
3618 DEVICE_LITTLE_ENDIAN);
3619}
3620
3621void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3622 MemTxAttrs attrs, MemTxResult *result)
3623{
3624 address_space_stw_internal(as, addr, val, attrs, result,
3625 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003626}
3627
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003628void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003629{
Peter Maydell50013112015-04-26 16:49:24 +01003630 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003631}
3632
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003633void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003634{
Peter Maydell50013112015-04-26 16:49:24 +01003635 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003636}
3637
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003638void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003639{
Peter Maydell50013112015-04-26 16:49:24 +01003640 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003641}
3642
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003643static inline void address_space_stq_internal(AddressSpace *as,
3644 hwaddr addr, uint64_t val,
3645 MemTxAttrs attrs,
3646 MemTxResult *result,
3647 enum device_endian endian)
Peter Maydell50013112015-04-26 16:49:24 +01003648{
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003649 uint8_t *ptr;
3650 MemoryRegion *mr;
3651 hwaddr l = 8;
3652 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003653 MemTxResult r;
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003654 bool release_lock = false;
3655
3656 rcu_read_lock();
3657 mr = address_space_translate(as, addr, &addr1, &l, true);
3658 if (l < 8 || !memory_access_is_direct(mr, true)) {
3659 release_lock |= prepare_mmio_access(mr);
3660
3661#if defined(TARGET_WORDS_BIGENDIAN)
3662 if (endian == DEVICE_LITTLE_ENDIAN) {
3663 val = bswap64(val);
3664 }
3665#else
3666 if (endian == DEVICE_BIG_ENDIAN) {
3667 val = bswap64(val);
3668 }
3669#endif
3670 r = memory_region_dispatch_write(mr, addr1, val, 8, attrs);
3671 } else {
3672 /* RAM case */
3673 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
3674 switch (endian) {
3675 case DEVICE_LITTLE_ENDIAN:
3676 stq_le_p(ptr, val);
3677 break;
3678 case DEVICE_BIG_ENDIAN:
3679 stq_be_p(ptr, val);
3680 break;
3681 default:
3682 stq_p(ptr, val);
3683 break;
3684 }
3685 invalidate_and_set_dirty(mr, addr1, 8);
3686 r = MEMTX_OK;
3687 }
Peter Maydell50013112015-04-26 16:49:24 +01003688 if (result) {
3689 *result = r;
3690 }
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003691 if (release_lock) {
3692 qemu_mutex_unlock_iothread();
3693 }
3694 rcu_read_unlock();
3695}
3696
3697void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3698 MemTxAttrs attrs, MemTxResult *result)
3699{
3700 address_space_stq_internal(as, addr, val, attrs, result,
3701 DEVICE_NATIVE_ENDIAN);
Peter Maydell50013112015-04-26 16:49:24 +01003702}
3703
3704void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3705 MemTxAttrs attrs, MemTxResult *result)
3706{
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003707 address_space_stq_internal(as, addr, val, attrs, result,
3708 DEVICE_LITTLE_ENDIAN);
Peter Maydell50013112015-04-26 16:49:24 +01003709}
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003710
Peter Maydell50013112015-04-26 16:49:24 +01003711void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3712 MemTxAttrs attrs, MemTxResult *result)
3713{
Paolo Bonzini2651efe2016-11-22 11:15:57 +01003714 address_space_stq_internal(as, addr, val, attrs, result,
3715 DEVICE_BIG_ENDIAN);
Peter Maydell50013112015-04-26 16:49:24 +01003716}
3717
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003718void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003719{
Peter Maydell50013112015-04-26 16:49:24 +01003720 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003721}
3722
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003723void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003724{
Peter Maydell50013112015-04-26 16:49:24 +01003725 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003726}
3727
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003728void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003729{
Peter Maydell50013112015-04-26 16:49:24 +01003730 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003731}
3732
aliguori5e2972f2009-03-28 17:51:36 +00003733/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003734int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003735 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003736{
3737 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003738 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003739 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003740
3741 while (len > 0) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003742 int asidx;
3743 MemTxAttrs attrs;
3744
bellard13eb76e2004-01-24 15:23:36 +00003745 page = addr & TARGET_PAGE_MASK;
Peter Maydell5232e4c2016-01-21 14:15:06 +00003746 phys_addr = cpu_get_phys_page_attrs_debug(cpu, page, &attrs);
3747 asidx = cpu_asidx_from_attrs(cpu, attrs);
bellard13eb76e2004-01-24 15:23:36 +00003748 /* if no physical page mapped, return an error */
3749 if (phys_addr == -1)
3750 return -1;
3751 l = (page + TARGET_PAGE_SIZE) - addr;
3752 if (l > len)
3753 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003754 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003755 if (is_write) {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003756 cpu_physical_memory_write_rom(cpu->cpu_ases[asidx].as,
3757 phys_addr, buf, l);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003758 } else {
Peter Maydell5232e4c2016-01-21 14:15:06 +00003759 address_space_rw(cpu->cpu_ases[asidx].as, phys_addr,
3760 MEMTXATTRS_UNSPECIFIED,
Peter Maydell5c9eb022015-04-26 16:49:24 +01003761 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003762 }
bellard13eb76e2004-01-24 15:23:36 +00003763 len -= l;
3764 buf += l;
3765 addr += l;
3766 }
3767 return 0;
3768}
Dr. David Alan Gilbert038629a2015-11-05 18:10:29 +00003769
3770/*
3771 * Allows code that needs to deal with migration bitmaps etc to still be built
3772 * target independent.
3773 */
3774size_t qemu_target_page_bits(void)
3775{
3776 return TARGET_PAGE_BITS;
3777}
3778
Paul Brooka68fe892010-03-01 00:08:59 +00003779#endif
bellard13eb76e2004-01-24 15:23:36 +00003780
Blue Swirl8e4a4242013-01-06 18:30:17 +00003781/*
3782 * A helper function for the _utterly broken_ virtio device model to find out if
3783 * it's running on a big endian machine. Don't do this at home kids!
3784 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003785bool target_words_bigendian(void);
3786bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003787{
3788#if defined(TARGET_WORDS_BIGENDIAN)
3789 return true;
3790#else
3791 return false;
3792#endif
3793}
3794
Wen Congyang76f35532012-05-07 12:04:18 +08003795#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003796bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003797{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003798 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003799 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003800 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003801
Paolo Bonzini41063e12015-03-18 14:21:43 +01003802 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003803 mr = address_space_translate(&address_space_memory,
3804 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003805
Paolo Bonzini41063e12015-03-18 14:21:43 +01003806 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3807 rcu_read_unlock();
3808 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003809}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003810
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003811int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003812{
3813 RAMBlock *block;
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003814 int ret = 0;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003815
Mike Day0dc3f442013-09-05 14:41:35 -04003816 rcu_read_lock();
3817 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003818 ret = func(block->idstr, block->host, block->offset,
3819 block->used_length, opaque);
3820 if (ret) {
3821 break;
3822 }
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003823 }
Mike Day0dc3f442013-09-05 14:41:35 -04003824 rcu_read_unlock();
Dr. David Alan Gilberte3807052015-05-21 13:24:13 +01003825 return ret;
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003826}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003827#endif